repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
edxzw/edx-platform | cms/djangoapps/contentstore/views/tests/test_library.py | 114 | 9392 | """
Unit tests for contentstore.views.library
More important high-level tests are in contentstore/tests/test_libraries.py
"""
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json
from contentstore.utils import reverse_course_url, reverse_library_url
from contentstore.views.component import get_component_templates
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import LibraryFactory
from mock import patch
from opaque_keys.edx.locator import CourseKey, LibraryLocator
import ddt
from student.roles import LibraryUserRole
LIBRARY_REST_URL = '/library/' # URL for GET/POST requests involving libraries
def make_url_for_lib(key):
""" Get the RESTful/studio URL for testing the given library """
if isinstance(key, LibraryLocator):
key = unicode(key)
return LIBRARY_REST_URL + key
@ddt.ddt
class UnitTestLibraries(ModuleStoreTestCase):
"""
Unit tests for library views
"""
def setUp(self):
user_password = super(UnitTestLibraries, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=user_password)
######################################################
# Tests for /library/ - list and create libraries:
@patch("contentstore.views.library.LIBRARIES_ENABLED", False)
def test_with_libraries_disabled(self):
"""
The library URLs should return 404 if libraries are disabled.
"""
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 404)
def test_list_libraries(self):
"""
Test that we can GET /library/ to list all libraries visible to the current user.
"""
# Create some more libraries
libraries = [LibraryFactory.create() for _ in range(3)]
lib_dict = dict([(lib.location.library_key, lib) for lib in libraries])
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 200)
lib_list = parse_json(response)
self.assertEqual(len(lib_list), len(libraries))
for entry in lib_list:
self.assertIn("library_key", entry)
self.assertIn("display_name", entry)
key = CourseKey.from_string(entry["library_key"])
self.assertIn(key, lib_dict)
self.assertEqual(entry["display_name"], lib_dict[key].display_name)
del lib_dict[key] # To ensure no duplicates are matched
@ddt.data("delete", "put")
def test_bad_http_verb(self, verb):
"""
We should get an error if we do weird requests to /library/
"""
response = getattr(self.client, verb)(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 405)
def test_create_library(self):
""" Create a library. """
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org',
'library': 'lib',
'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
# That's all we check. More detailed tests are in contentstore.tests.test_libraries...
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True})
def test_lib_create_permission(self):
"""
Users who are not given course creator roles should still be able to
create libraries.
"""
self.client.logout()
ns_user, password = self.create_non_staff_user()
self.client.login(username=ns_user.username, password=password)
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org', 'library': 'lib', 'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
@ddt.data(
{},
{'org': 'org'},
{'library': 'lib'},
{'org': 'C++', 'library': 'lib', 'display_name': 'Lib with invalid characters in key'},
{'org': 'Org', 'library': 'Wh@t?', 'display_name': 'Lib with invalid characters in key'},
)
def test_create_library_invalid(self, data):
"""
Make sure we are prevented from creating libraries with invalid keys/data
"""
response = self.client.ajax_post(LIBRARY_REST_URL, data)
self.assertEqual(response.status_code, 400)
def test_no_duplicate_libraries(self):
"""
We should not be able to create multiple libraries with the same key
"""
lib = LibraryFactory.create()
lib_key = lib.location.library_key
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': lib_key.org,
'library': lib_key.library,
'display_name': "A Duplicate key, same as 'lib'",
})
self.assertIn('already a library defined', parse_json(response)['ErrMsg'])
self.assertEqual(response.status_code, 400)
######################################################
# Tests for /library/:lib_key/ - get a specific library as JSON or HTML editing view
def test_get_lib_info(self):
"""
Test that we can get data about a library (in JSON format) using /library/:key/
"""
# Create a library
lib_key = LibraryFactory.create().location.library_key
# Re-load the library from the modulestore, explicitly including version information:
lib = self.store.get_library(lib_key, remove_version=False, remove_branch=False)
version = lib.location.library_key.version_guid
self.assertNotEqual(version, None)
response = self.client.get_json(make_url_for_lib(lib_key))
self.assertEqual(response.status_code, 200)
info = parse_json(response)
self.assertEqual(info['display_name'], lib.display_name)
self.assertEqual(info['library_id'], unicode(lib_key))
self.assertEqual(info['previous_version'], None)
self.assertNotEqual(info['version'], None)
self.assertNotEqual(info['version'], '')
self.assertEqual(info['version'], unicode(version))
def test_get_lib_edit_html(self):
"""
Test that we can get the studio view for editing a library using /library/:key/
"""
lib = LibraryFactory.create()
response = self.client.get(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 200)
self.assertIn("<html", response.content)
self.assertIn(lib.display_name, response.content)
@ddt.data('library-v1:Nonexistent+library', 'course-v1:Org+Course', 'course-v1:Org+Course+Run', 'invalid')
def test_invalid_keys(self, key_str):
"""
Check that various Nonexistent/invalid keys give 404 errors
"""
response = self.client.get_json(make_url_for_lib(key_str))
self.assertEqual(response.status_code, 404)
def test_bad_http_verb_with_lib_key(self):
"""
We should get an error if we do weird requests to /library/
"""
lib = LibraryFactory.create()
for verb in ("post", "delete", "put"):
response = getattr(self.client, verb)(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 405)
def test_no_access(self):
user, password = self.create_non_staff_user()
self.client.login(username=user, password=password)
lib = LibraryFactory.create()
response = self.client.get(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 403)
def test_get_component_templates(self):
"""
Verify that templates for adding discussion and advanced components to
content libraries are not provided.
"""
lib = LibraryFactory.create()
lib.advanced_modules = ['lti']
lib.save()
templates = [template['type'] for template in get_component_templates(lib, library=True)]
self.assertIn('problem', templates)
self.assertNotIn('discussion', templates)
self.assertNotIn('advanced', templates)
def test_manage_library_users(self):
"""
Simple test that the Library "User Access" view works.
Also tests that we can use the REST API to assign a user to a library.
"""
library = LibraryFactory.create()
extra_user, _ = self.create_non_staff_user()
manage_users_url = reverse_library_url('manage_library_users', unicode(library.location.library_key))
response = self.client.get(manage_users_url)
self.assertEqual(response.status_code, 200)
# extra_user has not been assigned to the library so should not show up in the list:
self.assertNotIn(extra_user.username, response.content)
# Now add extra_user to the library:
user_details_url = reverse_course_url(
'course_team_handler',
library.location.library_key, kwargs={'email': extra_user.email}
)
edit_response = self.client.ajax_post(user_details_url, {"role": LibraryUserRole.ROLE})
self.assertIn(edit_response.status_code, (200, 204))
# Now extra_user should apear in the list:
response = self.client.get(manage_users_url)
self.assertEqual(response.status_code, 200)
self.assertIn(extra_user.username, response.content)
| agpl-3.0 |
maxrothman/aws-alfred-workflow | venv/lib/python2.7/site-packages/botocore/vendored/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| mit |
onia/pygobject | gi/overrides/__init__.py | 1 | 3828 | import types
import warnings
import functools
from gi import PyGIDeprecationWarning
from gi._gi import CallableInfo
from gi._gobject.constants import \
TYPE_NONE, \
TYPE_INVALID
# support overrides in different directories than our gi module
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
registry = None
class _Registry(dict):
def __setitem__(self, key, value):
'''We do checks here to make sure only submodules of the override
module are added. Key and value should be the same object and come
from the gi.override module.
We add the override to the dict as "override_module.name". For instance
if we were overriding Gtk.Button you would retrive it as such:
registry['Gtk.Button']
'''
if not key == value:
raise KeyError('You have tried to modify the registry. This should only be done by the override decorator')
try:
info = getattr(value, '__info__')
except AttributeError:
raise TypeError('Can not override a type %s, which is not in a gobject introspection typelib' % value.__name__)
if not value.__module__.startswith('gi.overrides'):
raise KeyError('You have tried to modify the registry outside of the overrides module. '
'This is not allowed (%s, %s)' % (value, value.__module__))
g_type = info.get_g_type()
assert g_type != TYPE_NONE
if g_type != TYPE_INVALID:
g_type.pytype = value
# strip gi.overrides from module name
module = value.__module__[13:]
key = "%s.%s" % (module, value.__name__)
super(_Registry, self).__setitem__(key, value)
def register(self, override_class):
self[override_class] = override_class
class overridefunc(object):
'''decorator for overriding a function'''
def __init__(self, func):
if not isinstance(func, CallableInfo):
raise TypeError("func must be a gi function, got %s" % func)
from ..importer import modules
module_name = func.__module__.rsplit('.', 1)[-1]
self.module = modules[module_name]._introspection_module
def __call__(self, func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
setattr(self.module, func.__name__, wrapper)
return wrapper
registry = _Registry()
def override(type_):
'''Decorator for registering an override'''
if isinstance(type_, (types.FunctionType, CallableInfo)):
return overridefunc(type_)
else:
registry.register(type_)
return type_
def deprecated(fn, replacement):
'''Decorator for marking methods and classes as deprecated'''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
warnings.warn('%s is deprecated; use %s instead' % (fn.__name__, replacement),
PyGIDeprecationWarning, stacklevel=2)
return fn(*args, **kwargs)
return wrapped
def strip_boolean_result(method, exc_type=None, exc_str=None, fail_ret=None):
'''Translate method's return value for stripping off success flag.
There are a lot of methods which return a "success" boolean and have
several out arguments. Translate such a method to return the out arguments
on success and None on failure.
'''
@functools.wraps(method)
def wrapped(*args, **kwargs):
ret = method(*args, **kwargs)
if ret[0]:
if len(ret) == 2:
return ret[1]
else:
return ret[1:]
else:
if exc_type:
raise exc_type(exc_str or 'call failed')
return fail_ret
return wrapped
| lgpl-2.1 |
Azure/azure-sdk-for-python | sdk/confluent/azure-mgmt-confluent/azure/mgmt/confluent/aio/_configuration.py | 1 | 3228 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ConfluentManagementClientConfiguration(Configuration):
"""Configuration for ConfluentManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Microsoft Azure subscription id.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ConfluentManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-03-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-confluent/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit |
GrandHsu/rt-thread | components/external/freetype/src/tools/glnames.py | 360 | 105239 | #!/usr/bin/env python
#
#
# FreeType 2 glyph name builder
#
# Copyright 1996-2000, 2003, 2005, 2007, 2008, 2011 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
"""\
usage: %s <output-file>
This python script generates the glyph names tables defined in the
`psnames' module.
Its single argument is the name of the header file to be created.
"""
import sys, string, struct, re, os.path
# This table lists the glyphs according to the Macintosh specification.
# It is used by the TrueType Postscript names table.
#
# See
#
# http://fonts.apple.com/TTRefMan/RM06/Chap6post.html
#
# for the official list.
#
mac_standard_names = \
[
# 0
".notdef", ".null", "nonmarkingreturn", "space", "exclam",
"quotedbl", "numbersign", "dollar", "percent", "ampersand",
# 10
"quotesingle", "parenleft", "parenright", "asterisk", "plus",
"comma", "hyphen", "period", "slash", "zero",
# 20
"one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "colon",
# 30
"semicolon", "less", "equal", "greater", "question",
"at", "A", "B", "C", "D",
# 40
"E", "F", "G", "H", "I",
"J", "K", "L", "M", "N",
# 50
"O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X",
# 60
"Y", "Z", "bracketleft", "backslash", "bracketright",
"asciicircum", "underscore", "grave", "a", "b",
# 70
"c", "d", "e", "f", "g",
"h", "i", "j", "k", "l",
# 80
"m", "n", "o", "p", "q",
"r", "s", "t", "u", "v",
# 90
"w", "x", "y", "z", "braceleft",
"bar", "braceright", "asciitilde", "Adieresis", "Aring",
# 100
"Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
"aacute", "agrave", "acircumflex", "adieresis", "atilde",
# 110
"aring", "ccedilla", "eacute", "egrave", "ecircumflex",
"edieresis", "iacute", "igrave", "icircumflex", "idieresis",
# 120
"ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
"otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
# 130
"dagger", "degree", "cent", "sterling", "section",
"bullet", "paragraph", "germandbls", "registered", "copyright",
# 140
"trademark", "acute", "dieresis", "notequal", "AE",
"Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
# 150
"yen", "mu", "partialdiff", "summation", "product",
"pi", "integral", "ordfeminine", "ordmasculine", "Omega",
# 160
"ae", "oslash", "questiondown", "exclamdown", "logicalnot",
"radical", "florin", "approxequal", "Delta", "guillemotleft",
# 170
"guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde",
"Otilde", "OE", "oe", "endash", "emdash",
# 180
"quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
"lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
# 190
"guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
"periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
"Acircumflex",
# 200
"Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
"Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
# 210
"apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
"dotlessi", "circumflex", "tilde", "macron", "breve",
# 220
"dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
"caron", "Lslash", "lslash", "Scaron", "scaron",
# 230
"Zcaron", "zcaron", "brokenbar", "Eth", "eth",
"Yacute", "yacute", "Thorn", "thorn", "minus",
# 240
"multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
"onequarter", "threequarters", "franc", "Gbreve", "gbreve",
# 250
"Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute",
"Ccaron", "ccaron", "dcroat"
]
# The list of standard `SID' glyph names. For the official list,
# see Annex A of document at
#
# http://partners.adobe.com/public/developer/en/font/5176.CFF.pdf .
#
sid_standard_names = \
[
# 0
".notdef", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright", "parenleft",
# 10
"parenright", "asterisk", "plus", "comma", "hyphen",
"period", "slash", "zero", "one", "two",
# 20
"three", "four", "five", "six", "seven",
"eight", "nine", "colon", "semicolon", "less",
# 30
"equal", "greater", "question", "at", "A",
"B", "C", "D", "E", "F",
# 40
"G", "H", "I", "J", "K",
"L", "M", "N", "O", "P",
# 50
"Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z",
# 60
"bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
"quoteleft", "a", "b", "c", "d",
# 70
"e", "f", "g", "h", "i",
"j", "k", "l", "m", "n",
# 80
"o", "p", "q", "r", "s",
"t", "u", "v", "w", "x",
# 90
"y", "z", "braceleft", "bar", "braceright",
"asciitilde", "exclamdown", "cent", "sterling", "fraction",
# 100
"yen", "florin", "section", "currency", "quotesingle",
"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
# 110
"fl", "endash", "dagger", "daggerdbl", "periodcentered",
"paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
# 120
"guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
"acute", "circumflex", "tilde", "macron", "breve",
# 130
"dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
"ogonek", "caron", "emdash", "AE", "ordfeminine",
# 140
"Lslash", "Oslash", "OE", "ordmasculine", "ae",
"dotlessi", "lslash", "oslash", "oe", "germandbls",
# 150
"onesuperior", "logicalnot", "mu", "trademark", "Eth",
"onehalf", "plusminus", "Thorn", "onequarter", "divide",
# 160
"brokenbar", "degree", "thorn", "threequarters", "twosuperior",
"registered", "minus", "eth", "multiply", "threesuperior",
# 170
"copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
# 180
"Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
"Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
# 190
"Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
"Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
# 200
"aacute", "acircumflex", "adieresis", "agrave", "aring",
"atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
# 210
"egrave", "iacute", "icircumflex", "idieresis", "igrave",
"ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
# 220
"otilde", "scaron", "uacute", "ucircumflex", "udieresis",
"ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
# 230
"Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
"Acutesmall",
"parenleftsuperior", "parenrightsuperior", "twodotenleader",
"onedotenleader", "zerooldstyle",
# 240
"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
"fiveoldstyle",
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
"commasuperior",
# 250
"threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
"bsuperior",
"centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
# 260
"msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
"tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
# 270
"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
"Asmall",
"Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
# 280
"Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
"Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
# 290
"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
"Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
# 300
"colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
"centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
"Dieresissmall",
# 310
"Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
"hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
"questiondownsmall",
# 320
"oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
"twothirds", "zerosuperior", "foursuperior", "fivesuperior",
"sixsuperior",
# 330
"sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
"oneinferior",
"twoinferior", "threeinferior", "fourinferior", "fiveinferior",
"sixinferior",
# 340
"seveninferior", "eightinferior", "nineinferior", "centinferior",
"dollarinferior",
"periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
"Acircumflexsmall",
# 350
"Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
"Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
"Igravesmall",
# 360
"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
"Ntildesmall",
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
"Odieresissmall",
# 370
"OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
"001.000",
# 380
"001.001", "001.002", "001.003", "Black", "Bold",
"Book", "Light", "Medium", "Regular", "Roman",
# 390
"Semibold"
]
# This table maps character codes of the Adobe Standard Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_standard_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 0, 111, 112, 113,
114, 0, 115, 116, 117, 118, 119, 120, 121, 122,
0, 123, 0, 124, 125, 126, 127, 128, 129, 130,
131, 0, 132, 133, 0, 134, 135, 136, 137, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 138, 0, 139, 0, 0,
0, 0, 140, 141, 142, 143, 0, 0, 0, 0,
0, 144, 0, 0, 0, 145, 0, 0, 146, 147,
148, 149, 0, 0, 0, 0
]
# This table maps character codes of the Adobe Expert Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_expert_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 229, 230, 0, 231, 232, 233, 234,
235, 236, 237, 238, 13, 14, 15, 99, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 27, 28,
249, 250, 251, 252, 0, 253, 254, 255, 256, 257,
0, 0, 0, 258, 0, 0, 259, 260, 261, 262,
0, 0, 263, 264, 265, 0, 266, 109, 110, 267,
268, 269, 0, 270, 271, 272, 273, 274, 275, 276,
277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
297, 298, 299, 300, 301, 302, 303, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 304, 305, 306, 0, 0, 307, 308, 309, 310,
311, 0, 312, 0, 0, 313, 0, 0, 314, 315,
0, 0, 316, 317, 318, 0, 0, 0, 158, 155,
163, 319, 320, 321, 322, 323, 324, 325, 0, 0,
326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
373, 374, 375, 376, 377, 378
]
# This data has been taken literally from the files `glyphlist.txt'
# and `zapfdingbats.txt' version 2.0, Sept 2002. It is available from
#
# http://sourceforge.net/adobe/aglfn/
#
adobe_glyph_list = """\
A;0041
AE;00C6
AEacute;01FC
AEmacron;01E2
AEsmall;F7E6
Aacute;00C1
Aacutesmall;F7E1
Abreve;0102
Abreveacute;1EAE
Abrevecyrillic;04D0
Abrevedotbelow;1EB6
Abrevegrave;1EB0
Abrevehookabove;1EB2
Abrevetilde;1EB4
Acaron;01CD
Acircle;24B6
Acircumflex;00C2
Acircumflexacute;1EA4
Acircumflexdotbelow;1EAC
Acircumflexgrave;1EA6
Acircumflexhookabove;1EA8
Acircumflexsmall;F7E2
Acircumflextilde;1EAA
Acute;F6C9
Acutesmall;F7B4
Acyrillic;0410
Adblgrave;0200
Adieresis;00C4
Adieresiscyrillic;04D2
Adieresismacron;01DE
Adieresissmall;F7E4
Adotbelow;1EA0
Adotmacron;01E0
Agrave;00C0
Agravesmall;F7E0
Ahookabove;1EA2
Aiecyrillic;04D4
Ainvertedbreve;0202
Alpha;0391
Alphatonos;0386
Amacron;0100
Amonospace;FF21
Aogonek;0104
Aring;00C5
Aringacute;01FA
Aringbelow;1E00
Aringsmall;F7E5
Asmall;F761
Atilde;00C3
Atildesmall;F7E3
Aybarmenian;0531
B;0042
Bcircle;24B7
Bdotaccent;1E02
Bdotbelow;1E04
Becyrillic;0411
Benarmenian;0532
Beta;0392
Bhook;0181
Blinebelow;1E06
Bmonospace;FF22
Brevesmall;F6F4
Bsmall;F762
Btopbar;0182
C;0043
Caarmenian;053E
Cacute;0106
Caron;F6CA
Caronsmall;F6F5
Ccaron;010C
Ccedilla;00C7
Ccedillaacute;1E08
Ccedillasmall;F7E7
Ccircle;24B8
Ccircumflex;0108
Cdot;010A
Cdotaccent;010A
Cedillasmall;F7B8
Chaarmenian;0549
Cheabkhasiancyrillic;04BC
Checyrillic;0427
Chedescenderabkhasiancyrillic;04BE
Chedescendercyrillic;04B6
Chedieresiscyrillic;04F4
Cheharmenian;0543
Chekhakassiancyrillic;04CB
Cheverticalstrokecyrillic;04B8
Chi;03A7
Chook;0187
Circumflexsmall;F6F6
Cmonospace;FF23
Coarmenian;0551
Csmall;F763
D;0044
DZ;01F1
DZcaron;01C4
Daarmenian;0534
Dafrican;0189
Dcaron;010E
Dcedilla;1E10
Dcircle;24B9
Dcircumflexbelow;1E12
Dcroat;0110
Ddotaccent;1E0A
Ddotbelow;1E0C
Decyrillic;0414
Deicoptic;03EE
Delta;2206
Deltagreek;0394
Dhook;018A
Dieresis;F6CB
DieresisAcute;F6CC
DieresisGrave;F6CD
Dieresissmall;F7A8
Digammagreek;03DC
Djecyrillic;0402
Dlinebelow;1E0E
Dmonospace;FF24
Dotaccentsmall;F6F7
Dslash;0110
Dsmall;F764
Dtopbar;018B
Dz;01F2
Dzcaron;01C5
Dzeabkhasiancyrillic;04E0
Dzecyrillic;0405
Dzhecyrillic;040F
E;0045
Eacute;00C9
Eacutesmall;F7E9
Ebreve;0114
Ecaron;011A
Ecedillabreve;1E1C
Echarmenian;0535
Ecircle;24BA
Ecircumflex;00CA
Ecircumflexacute;1EBE
Ecircumflexbelow;1E18
Ecircumflexdotbelow;1EC6
Ecircumflexgrave;1EC0
Ecircumflexhookabove;1EC2
Ecircumflexsmall;F7EA
Ecircumflextilde;1EC4
Ecyrillic;0404
Edblgrave;0204
Edieresis;00CB
Edieresissmall;F7EB
Edot;0116
Edotaccent;0116
Edotbelow;1EB8
Efcyrillic;0424
Egrave;00C8
Egravesmall;F7E8
Eharmenian;0537
Ehookabove;1EBA
Eightroman;2167
Einvertedbreve;0206
Eiotifiedcyrillic;0464
Elcyrillic;041B
Elevenroman;216A
Emacron;0112
Emacronacute;1E16
Emacrongrave;1E14
Emcyrillic;041C
Emonospace;FF25
Encyrillic;041D
Endescendercyrillic;04A2
Eng;014A
Enghecyrillic;04A4
Enhookcyrillic;04C7
Eogonek;0118
Eopen;0190
Epsilon;0395
Epsilontonos;0388
Ercyrillic;0420
Ereversed;018E
Ereversedcyrillic;042D
Escyrillic;0421
Esdescendercyrillic;04AA
Esh;01A9
Esmall;F765
Eta;0397
Etarmenian;0538
Etatonos;0389
Eth;00D0
Ethsmall;F7F0
Etilde;1EBC
Etildebelow;1E1A
Euro;20AC
Ezh;01B7
Ezhcaron;01EE
Ezhreversed;01B8
F;0046
Fcircle;24BB
Fdotaccent;1E1E
Feharmenian;0556
Feicoptic;03E4
Fhook;0191
Fitacyrillic;0472
Fiveroman;2164
Fmonospace;FF26
Fourroman;2163
Fsmall;F766
G;0047
GBsquare;3387
Gacute;01F4
Gamma;0393
Gammaafrican;0194
Gangiacoptic;03EA
Gbreve;011E
Gcaron;01E6
Gcedilla;0122
Gcircle;24BC
Gcircumflex;011C
Gcommaaccent;0122
Gdot;0120
Gdotaccent;0120
Gecyrillic;0413
Ghadarmenian;0542
Ghemiddlehookcyrillic;0494
Ghestrokecyrillic;0492
Gheupturncyrillic;0490
Ghook;0193
Gimarmenian;0533
Gjecyrillic;0403
Gmacron;1E20
Gmonospace;FF27
Grave;F6CE
Gravesmall;F760
Gsmall;F767
Gsmallhook;029B
Gstroke;01E4
H;0048
H18533;25CF
H18543;25AA
H18551;25AB
H22073;25A1
HPsquare;33CB
Haabkhasiancyrillic;04A8
Hadescendercyrillic;04B2
Hardsigncyrillic;042A
Hbar;0126
Hbrevebelow;1E2A
Hcedilla;1E28
Hcircle;24BD
Hcircumflex;0124
Hdieresis;1E26
Hdotaccent;1E22
Hdotbelow;1E24
Hmonospace;FF28
Hoarmenian;0540
Horicoptic;03E8
Hsmall;F768
Hungarumlaut;F6CF
Hungarumlautsmall;F6F8
Hzsquare;3390
I;0049
IAcyrillic;042F
IJ;0132
IUcyrillic;042E
Iacute;00CD
Iacutesmall;F7ED
Ibreve;012C
Icaron;01CF
Icircle;24BE
Icircumflex;00CE
Icircumflexsmall;F7EE
Icyrillic;0406
Idblgrave;0208
Idieresis;00CF
Idieresisacute;1E2E
Idieresiscyrillic;04E4
Idieresissmall;F7EF
Idot;0130
Idotaccent;0130
Idotbelow;1ECA
Iebrevecyrillic;04D6
Iecyrillic;0415
Ifraktur;2111
Igrave;00CC
Igravesmall;F7EC
Ihookabove;1EC8
Iicyrillic;0418
Iinvertedbreve;020A
Iishortcyrillic;0419
Imacron;012A
Imacroncyrillic;04E2
Imonospace;FF29
Iniarmenian;053B
Iocyrillic;0401
Iogonek;012E
Iota;0399
Iotaafrican;0196
Iotadieresis;03AA
Iotatonos;038A
Ismall;F769
Istroke;0197
Itilde;0128
Itildebelow;1E2C
Izhitsacyrillic;0474
Izhitsadblgravecyrillic;0476
J;004A
Jaarmenian;0541
Jcircle;24BF
Jcircumflex;0134
Jecyrillic;0408
Jheharmenian;054B
Jmonospace;FF2A
Jsmall;F76A
K;004B
KBsquare;3385
KKsquare;33CD
Kabashkircyrillic;04A0
Kacute;1E30
Kacyrillic;041A
Kadescendercyrillic;049A
Kahookcyrillic;04C3
Kappa;039A
Kastrokecyrillic;049E
Kaverticalstrokecyrillic;049C
Kcaron;01E8
Kcedilla;0136
Kcircle;24C0
Kcommaaccent;0136
Kdotbelow;1E32
Keharmenian;0554
Kenarmenian;053F
Khacyrillic;0425
Kheicoptic;03E6
Khook;0198
Kjecyrillic;040C
Klinebelow;1E34
Kmonospace;FF2B
Koppacyrillic;0480
Koppagreek;03DE
Ksicyrillic;046E
Ksmall;F76B
L;004C
LJ;01C7
LL;F6BF
Lacute;0139
Lambda;039B
Lcaron;013D
Lcedilla;013B
Lcircle;24C1
Lcircumflexbelow;1E3C
Lcommaaccent;013B
Ldot;013F
Ldotaccent;013F
Ldotbelow;1E36
Ldotbelowmacron;1E38
Liwnarmenian;053C
Lj;01C8
Ljecyrillic;0409
Llinebelow;1E3A
Lmonospace;FF2C
Lslash;0141
Lslashsmall;F6F9
Lsmall;F76C
M;004D
MBsquare;3386
Macron;F6D0
Macronsmall;F7AF
Macute;1E3E
Mcircle;24C2
Mdotaccent;1E40
Mdotbelow;1E42
Menarmenian;0544
Mmonospace;FF2D
Msmall;F76D
Mturned;019C
Mu;039C
N;004E
NJ;01CA
Nacute;0143
Ncaron;0147
Ncedilla;0145
Ncircle;24C3
Ncircumflexbelow;1E4A
Ncommaaccent;0145
Ndotaccent;1E44
Ndotbelow;1E46
Nhookleft;019D
Nineroman;2168
Nj;01CB
Njecyrillic;040A
Nlinebelow;1E48
Nmonospace;FF2E
Nowarmenian;0546
Nsmall;F76E
Ntilde;00D1
Ntildesmall;F7F1
Nu;039D
O;004F
OE;0152
OEsmall;F6FA
Oacute;00D3
Oacutesmall;F7F3
Obarredcyrillic;04E8
Obarreddieresiscyrillic;04EA
Obreve;014E
Ocaron;01D1
Ocenteredtilde;019F
Ocircle;24C4
Ocircumflex;00D4
Ocircumflexacute;1ED0
Ocircumflexdotbelow;1ED8
Ocircumflexgrave;1ED2
Ocircumflexhookabove;1ED4
Ocircumflexsmall;F7F4
Ocircumflextilde;1ED6
Ocyrillic;041E
Odblacute;0150
Odblgrave;020C
Odieresis;00D6
Odieresiscyrillic;04E6
Odieresissmall;F7F6
Odotbelow;1ECC
Ogoneksmall;F6FB
Ograve;00D2
Ogravesmall;F7F2
Oharmenian;0555
Ohm;2126
Ohookabove;1ECE
Ohorn;01A0
Ohornacute;1EDA
Ohorndotbelow;1EE2
Ohorngrave;1EDC
Ohornhookabove;1EDE
Ohorntilde;1EE0
Ohungarumlaut;0150
Oi;01A2
Oinvertedbreve;020E
Omacron;014C
Omacronacute;1E52
Omacrongrave;1E50
Omega;2126
Omegacyrillic;0460
Omegagreek;03A9
Omegaroundcyrillic;047A
Omegatitlocyrillic;047C
Omegatonos;038F
Omicron;039F
Omicrontonos;038C
Omonospace;FF2F
Oneroman;2160
Oogonek;01EA
Oogonekmacron;01EC
Oopen;0186
Oslash;00D8
Oslashacute;01FE
Oslashsmall;F7F8
Osmall;F76F
Ostrokeacute;01FE
Otcyrillic;047E
Otilde;00D5
Otildeacute;1E4C
Otildedieresis;1E4E
Otildesmall;F7F5
P;0050
Pacute;1E54
Pcircle;24C5
Pdotaccent;1E56
Pecyrillic;041F
Peharmenian;054A
Pemiddlehookcyrillic;04A6
Phi;03A6
Phook;01A4
Pi;03A0
Piwrarmenian;0553
Pmonospace;FF30
Psi;03A8
Psicyrillic;0470
Psmall;F770
Q;0051
Qcircle;24C6
Qmonospace;FF31
Qsmall;F771
R;0052
Raarmenian;054C
Racute;0154
Rcaron;0158
Rcedilla;0156
Rcircle;24C7
Rcommaaccent;0156
Rdblgrave;0210
Rdotaccent;1E58
Rdotbelow;1E5A
Rdotbelowmacron;1E5C
Reharmenian;0550
Rfraktur;211C
Rho;03A1
Ringsmall;F6FC
Rinvertedbreve;0212
Rlinebelow;1E5E
Rmonospace;FF32
Rsmall;F772
Rsmallinverted;0281
Rsmallinvertedsuperior;02B6
S;0053
SF010000;250C
SF020000;2514
SF030000;2510
SF040000;2518
SF050000;253C
SF060000;252C
SF070000;2534
SF080000;251C
SF090000;2524
SF100000;2500
SF110000;2502
SF190000;2561
SF200000;2562
SF210000;2556
SF220000;2555
SF230000;2563
SF240000;2551
SF250000;2557
SF260000;255D
SF270000;255C
SF280000;255B
SF360000;255E
SF370000;255F
SF380000;255A
SF390000;2554
SF400000;2569
SF410000;2566
SF420000;2560
SF430000;2550
SF440000;256C
SF450000;2567
SF460000;2568
SF470000;2564
SF480000;2565
SF490000;2559
SF500000;2558
SF510000;2552
SF520000;2553
SF530000;256B
SF540000;256A
Sacute;015A
Sacutedotaccent;1E64
Sampigreek;03E0
Scaron;0160
Scarondotaccent;1E66
Scaronsmall;F6FD
Scedilla;015E
Schwa;018F
Schwacyrillic;04D8
Schwadieresiscyrillic;04DA
Scircle;24C8
Scircumflex;015C
Scommaaccent;0218
Sdotaccent;1E60
Sdotbelow;1E62
Sdotbelowdotaccent;1E68
Seharmenian;054D
Sevenroman;2166
Shaarmenian;0547
Shacyrillic;0428
Shchacyrillic;0429
Sheicoptic;03E2
Shhacyrillic;04BA
Shimacoptic;03EC
Sigma;03A3
Sixroman;2165
Smonospace;FF33
Softsigncyrillic;042C
Ssmall;F773
Stigmagreek;03DA
T;0054
Tau;03A4
Tbar;0166
Tcaron;0164
Tcedilla;0162
Tcircle;24C9
Tcircumflexbelow;1E70
Tcommaaccent;0162
Tdotaccent;1E6A
Tdotbelow;1E6C
Tecyrillic;0422
Tedescendercyrillic;04AC
Tenroman;2169
Tetsecyrillic;04B4
Theta;0398
Thook;01AC
Thorn;00DE
Thornsmall;F7FE
Threeroman;2162
Tildesmall;F6FE
Tiwnarmenian;054F
Tlinebelow;1E6E
Tmonospace;FF34
Toarmenian;0539
Tonefive;01BC
Tonesix;0184
Tonetwo;01A7
Tretroflexhook;01AE
Tsecyrillic;0426
Tshecyrillic;040B
Tsmall;F774
Twelveroman;216B
Tworoman;2161
U;0055
Uacute;00DA
Uacutesmall;F7FA
Ubreve;016C
Ucaron;01D3
Ucircle;24CA
Ucircumflex;00DB
Ucircumflexbelow;1E76
Ucircumflexsmall;F7FB
Ucyrillic;0423
Udblacute;0170
Udblgrave;0214
Udieresis;00DC
Udieresisacute;01D7
Udieresisbelow;1E72
Udieresiscaron;01D9
Udieresiscyrillic;04F0
Udieresisgrave;01DB
Udieresismacron;01D5
Udieresissmall;F7FC
Udotbelow;1EE4
Ugrave;00D9
Ugravesmall;F7F9
Uhookabove;1EE6
Uhorn;01AF
Uhornacute;1EE8
Uhorndotbelow;1EF0
Uhorngrave;1EEA
Uhornhookabove;1EEC
Uhorntilde;1EEE
Uhungarumlaut;0170
Uhungarumlautcyrillic;04F2
Uinvertedbreve;0216
Ukcyrillic;0478
Umacron;016A
Umacroncyrillic;04EE
Umacrondieresis;1E7A
Umonospace;FF35
Uogonek;0172
Upsilon;03A5
Upsilon1;03D2
Upsilonacutehooksymbolgreek;03D3
Upsilonafrican;01B1
Upsilondieresis;03AB
Upsilondieresishooksymbolgreek;03D4
Upsilonhooksymbol;03D2
Upsilontonos;038E
Uring;016E
Ushortcyrillic;040E
Usmall;F775
Ustraightcyrillic;04AE
Ustraightstrokecyrillic;04B0
Utilde;0168
Utildeacute;1E78
Utildebelow;1E74
V;0056
Vcircle;24CB
Vdotbelow;1E7E
Vecyrillic;0412
Vewarmenian;054E
Vhook;01B2
Vmonospace;FF36
Voarmenian;0548
Vsmall;F776
Vtilde;1E7C
W;0057
Wacute;1E82
Wcircle;24CC
Wcircumflex;0174
Wdieresis;1E84
Wdotaccent;1E86
Wdotbelow;1E88
Wgrave;1E80
Wmonospace;FF37
Wsmall;F777
X;0058
Xcircle;24CD
Xdieresis;1E8C
Xdotaccent;1E8A
Xeharmenian;053D
Xi;039E
Xmonospace;FF38
Xsmall;F778
Y;0059
Yacute;00DD
Yacutesmall;F7FD
Yatcyrillic;0462
Ycircle;24CE
Ycircumflex;0176
Ydieresis;0178
Ydieresissmall;F7FF
Ydotaccent;1E8E
Ydotbelow;1EF4
Yericyrillic;042B
Yerudieresiscyrillic;04F8
Ygrave;1EF2
Yhook;01B3
Yhookabove;1EF6
Yiarmenian;0545
Yicyrillic;0407
Yiwnarmenian;0552
Ymonospace;FF39
Ysmall;F779
Ytilde;1EF8
Yusbigcyrillic;046A
Yusbigiotifiedcyrillic;046C
Yuslittlecyrillic;0466
Yuslittleiotifiedcyrillic;0468
Z;005A
Zaarmenian;0536
Zacute;0179
Zcaron;017D
Zcaronsmall;F6FF
Zcircle;24CF
Zcircumflex;1E90
Zdot;017B
Zdotaccent;017B
Zdotbelow;1E92
Zecyrillic;0417
Zedescendercyrillic;0498
Zedieresiscyrillic;04DE
Zeta;0396
Zhearmenian;053A
Zhebrevecyrillic;04C1
Zhecyrillic;0416
Zhedescendercyrillic;0496
Zhedieresiscyrillic;04DC
Zlinebelow;1E94
Zmonospace;FF3A
Zsmall;F77A
Zstroke;01B5
a;0061
aabengali;0986
aacute;00E1
aadeva;0906
aagujarati;0A86
aagurmukhi;0A06
aamatragurmukhi;0A3E
aarusquare;3303
aavowelsignbengali;09BE
aavowelsigndeva;093E
aavowelsigngujarati;0ABE
abbreviationmarkarmenian;055F
abbreviationsigndeva;0970
abengali;0985
abopomofo;311A
abreve;0103
abreveacute;1EAF
abrevecyrillic;04D1
abrevedotbelow;1EB7
abrevegrave;1EB1
abrevehookabove;1EB3
abrevetilde;1EB5
acaron;01CE
acircle;24D0
acircumflex;00E2
acircumflexacute;1EA5
acircumflexdotbelow;1EAD
acircumflexgrave;1EA7
acircumflexhookabove;1EA9
acircumflextilde;1EAB
acute;00B4
acutebelowcmb;0317
acutecmb;0301
acutecomb;0301
acutedeva;0954
acutelowmod;02CF
acutetonecmb;0341
acyrillic;0430
adblgrave;0201
addakgurmukhi;0A71
adeva;0905
adieresis;00E4
adieresiscyrillic;04D3
adieresismacron;01DF
adotbelow;1EA1
adotmacron;01E1
ae;00E6
aeacute;01FD
aekorean;3150
aemacron;01E3
afii00208;2015
afii08941;20A4
afii10017;0410
afii10018;0411
afii10019;0412
afii10020;0413
afii10021;0414
afii10022;0415
afii10023;0401
afii10024;0416
afii10025;0417
afii10026;0418
afii10027;0419
afii10028;041A
afii10029;041B
afii10030;041C
afii10031;041D
afii10032;041E
afii10033;041F
afii10034;0420
afii10035;0421
afii10036;0422
afii10037;0423
afii10038;0424
afii10039;0425
afii10040;0426
afii10041;0427
afii10042;0428
afii10043;0429
afii10044;042A
afii10045;042B
afii10046;042C
afii10047;042D
afii10048;042E
afii10049;042F
afii10050;0490
afii10051;0402
afii10052;0403
afii10053;0404
afii10054;0405
afii10055;0406
afii10056;0407
afii10057;0408
afii10058;0409
afii10059;040A
afii10060;040B
afii10061;040C
afii10062;040E
afii10063;F6C4
afii10064;F6C5
afii10065;0430
afii10066;0431
afii10067;0432
afii10068;0433
afii10069;0434
afii10070;0435
afii10071;0451
afii10072;0436
afii10073;0437
afii10074;0438
afii10075;0439
afii10076;043A
afii10077;043B
afii10078;043C
afii10079;043D
afii10080;043E
afii10081;043F
afii10082;0440
afii10083;0441
afii10084;0442
afii10085;0443
afii10086;0444
afii10087;0445
afii10088;0446
afii10089;0447
afii10090;0448
afii10091;0449
afii10092;044A
afii10093;044B
afii10094;044C
afii10095;044D
afii10096;044E
afii10097;044F
afii10098;0491
afii10099;0452
afii10100;0453
afii10101;0454
afii10102;0455
afii10103;0456
afii10104;0457
afii10105;0458
afii10106;0459
afii10107;045A
afii10108;045B
afii10109;045C
afii10110;045E
afii10145;040F
afii10146;0462
afii10147;0472
afii10148;0474
afii10192;F6C6
afii10193;045F
afii10194;0463
afii10195;0473
afii10196;0475
afii10831;F6C7
afii10832;F6C8
afii10846;04D9
afii299;200E
afii300;200F
afii301;200D
afii57381;066A
afii57388;060C
afii57392;0660
afii57393;0661
afii57394;0662
afii57395;0663
afii57396;0664
afii57397;0665
afii57398;0666
afii57399;0667
afii57400;0668
afii57401;0669
afii57403;061B
afii57407;061F
afii57409;0621
afii57410;0622
afii57411;0623
afii57412;0624
afii57413;0625
afii57414;0626
afii57415;0627
afii57416;0628
afii57417;0629
afii57418;062A
afii57419;062B
afii57420;062C
afii57421;062D
afii57422;062E
afii57423;062F
afii57424;0630
afii57425;0631
afii57426;0632
afii57427;0633
afii57428;0634
afii57429;0635
afii57430;0636
afii57431;0637
afii57432;0638
afii57433;0639
afii57434;063A
afii57440;0640
afii57441;0641
afii57442;0642
afii57443;0643
afii57444;0644
afii57445;0645
afii57446;0646
afii57448;0648
afii57449;0649
afii57450;064A
afii57451;064B
afii57452;064C
afii57453;064D
afii57454;064E
afii57455;064F
afii57456;0650
afii57457;0651
afii57458;0652
afii57470;0647
afii57505;06A4
afii57506;067E
afii57507;0686
afii57508;0698
afii57509;06AF
afii57511;0679
afii57512;0688
afii57513;0691
afii57514;06BA
afii57519;06D2
afii57534;06D5
afii57636;20AA
afii57645;05BE
afii57658;05C3
afii57664;05D0
afii57665;05D1
afii57666;05D2
afii57667;05D3
afii57668;05D4
afii57669;05D5
afii57670;05D6
afii57671;05D7
afii57672;05D8
afii57673;05D9
afii57674;05DA
afii57675;05DB
afii57676;05DC
afii57677;05DD
afii57678;05DE
afii57679;05DF
afii57680;05E0
afii57681;05E1
afii57682;05E2
afii57683;05E3
afii57684;05E4
afii57685;05E5
afii57686;05E6
afii57687;05E7
afii57688;05E8
afii57689;05E9
afii57690;05EA
afii57694;FB2A
afii57695;FB2B
afii57700;FB4B
afii57705;FB1F
afii57716;05F0
afii57717;05F1
afii57718;05F2
afii57723;FB35
afii57793;05B4
afii57794;05B5
afii57795;05B6
afii57796;05BB
afii57797;05B8
afii57798;05B7
afii57799;05B0
afii57800;05B2
afii57801;05B1
afii57802;05B3
afii57803;05C2
afii57804;05C1
afii57806;05B9
afii57807;05BC
afii57839;05BD
afii57841;05BF
afii57842;05C0
afii57929;02BC
afii61248;2105
afii61289;2113
afii61352;2116
afii61573;202C
afii61574;202D
afii61575;202E
afii61664;200C
afii63167;066D
afii64937;02BD
agrave;00E0
agujarati;0A85
agurmukhi;0A05
ahiragana;3042
ahookabove;1EA3
aibengali;0990
aibopomofo;311E
aideva;0910
aiecyrillic;04D5
aigujarati;0A90
aigurmukhi;0A10
aimatragurmukhi;0A48
ainarabic;0639
ainfinalarabic;FECA
aininitialarabic;FECB
ainmedialarabic;FECC
ainvertedbreve;0203
aivowelsignbengali;09C8
aivowelsigndeva;0948
aivowelsigngujarati;0AC8
akatakana;30A2
akatakanahalfwidth;FF71
akorean;314F
alef;05D0
alefarabic;0627
alefdageshhebrew;FB30
aleffinalarabic;FE8E
alefhamzaabovearabic;0623
alefhamzaabovefinalarabic;FE84
alefhamzabelowarabic;0625
alefhamzabelowfinalarabic;FE88
alefhebrew;05D0
aleflamedhebrew;FB4F
alefmaddaabovearabic;0622
alefmaddaabovefinalarabic;FE82
alefmaksuraarabic;0649
alefmaksurafinalarabic;FEF0
alefmaksurainitialarabic;FEF3
alefmaksuramedialarabic;FEF4
alefpatahhebrew;FB2E
alefqamatshebrew;FB2F
aleph;2135
allequal;224C
alpha;03B1
alphatonos;03AC
amacron;0101
amonospace;FF41
ampersand;0026
ampersandmonospace;FF06
ampersandsmall;F726
amsquare;33C2
anbopomofo;3122
angbopomofo;3124
angkhankhuthai;0E5A
angle;2220
anglebracketleft;3008
anglebracketleftvertical;FE3F
anglebracketright;3009
anglebracketrightvertical;FE40
angleleft;2329
angleright;232A
angstrom;212B
anoteleia;0387
anudattadeva;0952
anusvarabengali;0982
anusvaradeva;0902
anusvaragujarati;0A82
aogonek;0105
apaatosquare;3300
aparen;249C
apostrophearmenian;055A
apostrophemod;02BC
apple;F8FF
approaches;2250
approxequal;2248
approxequalorimage;2252
approximatelyequal;2245
araeaekorean;318E
araeakorean;318D
arc;2312
arighthalfring;1E9A
aring;00E5
aringacute;01FB
aringbelow;1E01
arrowboth;2194
arrowdashdown;21E3
arrowdashleft;21E0
arrowdashright;21E2
arrowdashup;21E1
arrowdblboth;21D4
arrowdbldown;21D3
arrowdblleft;21D0
arrowdblright;21D2
arrowdblup;21D1
arrowdown;2193
arrowdownleft;2199
arrowdownright;2198
arrowdownwhite;21E9
arrowheaddownmod;02C5
arrowheadleftmod;02C2
arrowheadrightmod;02C3
arrowheadupmod;02C4
arrowhorizex;F8E7
arrowleft;2190
arrowleftdbl;21D0
arrowleftdblstroke;21CD
arrowleftoverright;21C6
arrowleftwhite;21E6
arrowright;2192
arrowrightdblstroke;21CF
arrowrightheavy;279E
arrowrightoverleft;21C4
arrowrightwhite;21E8
arrowtableft;21E4
arrowtabright;21E5
arrowup;2191
arrowupdn;2195
arrowupdnbse;21A8
arrowupdownbase;21A8
arrowupleft;2196
arrowupleftofdown;21C5
arrowupright;2197
arrowupwhite;21E7
arrowvertex;F8E6
asciicircum;005E
asciicircummonospace;FF3E
asciitilde;007E
asciitildemonospace;FF5E
ascript;0251
ascriptturned;0252
asmallhiragana;3041
asmallkatakana;30A1
asmallkatakanahalfwidth;FF67
asterisk;002A
asteriskaltonearabic;066D
asteriskarabic;066D
asteriskmath;2217
asteriskmonospace;FF0A
asterisksmall;FE61
asterism;2042
asuperior;F6E9
asymptoticallyequal;2243
at;0040
atilde;00E3
atmonospace;FF20
atsmall;FE6B
aturned;0250
aubengali;0994
aubopomofo;3120
audeva;0914
augujarati;0A94
augurmukhi;0A14
aulengthmarkbengali;09D7
aumatragurmukhi;0A4C
auvowelsignbengali;09CC
auvowelsigndeva;094C
auvowelsigngujarati;0ACC
avagrahadeva;093D
aybarmenian;0561
ayin;05E2
ayinaltonehebrew;FB20
ayinhebrew;05E2
b;0062
babengali;09AC
backslash;005C
backslashmonospace;FF3C
badeva;092C
bagujarati;0AAC
bagurmukhi;0A2C
bahiragana;3070
bahtthai;0E3F
bakatakana;30D0
bar;007C
barmonospace;FF5C
bbopomofo;3105
bcircle;24D1
bdotaccent;1E03
bdotbelow;1E05
beamedsixteenthnotes;266C
because;2235
becyrillic;0431
beharabic;0628
behfinalarabic;FE90
behinitialarabic;FE91
behiragana;3079
behmedialarabic;FE92
behmeeminitialarabic;FC9F
behmeemisolatedarabic;FC08
behnoonfinalarabic;FC6D
bekatakana;30D9
benarmenian;0562
bet;05D1
beta;03B2
betasymbolgreek;03D0
betdagesh;FB31
betdageshhebrew;FB31
bethebrew;05D1
betrafehebrew;FB4C
bhabengali;09AD
bhadeva;092D
bhagujarati;0AAD
bhagurmukhi;0A2D
bhook;0253
bihiragana;3073
bikatakana;30D3
bilabialclick;0298
bindigurmukhi;0A02
birusquare;3331
blackcircle;25CF
blackdiamond;25C6
blackdownpointingtriangle;25BC
blackleftpointingpointer;25C4
blackleftpointingtriangle;25C0
blacklenticularbracketleft;3010
blacklenticularbracketleftvertical;FE3B
blacklenticularbracketright;3011
blacklenticularbracketrightvertical;FE3C
blacklowerlefttriangle;25E3
blacklowerrighttriangle;25E2
blackrectangle;25AC
blackrightpointingpointer;25BA
blackrightpointingtriangle;25B6
blacksmallsquare;25AA
blacksmilingface;263B
blacksquare;25A0
blackstar;2605
blackupperlefttriangle;25E4
blackupperrighttriangle;25E5
blackuppointingsmalltriangle;25B4
blackuppointingtriangle;25B2
blank;2423
blinebelow;1E07
block;2588
bmonospace;FF42
bobaimaithai;0E1A
bohiragana;307C
bokatakana;30DC
bparen;249D
bqsquare;33C3
braceex;F8F4
braceleft;007B
braceleftbt;F8F3
braceleftmid;F8F2
braceleftmonospace;FF5B
braceleftsmall;FE5B
bracelefttp;F8F1
braceleftvertical;FE37
braceright;007D
bracerightbt;F8FE
bracerightmid;F8FD
bracerightmonospace;FF5D
bracerightsmall;FE5C
bracerighttp;F8FC
bracerightvertical;FE38
bracketleft;005B
bracketleftbt;F8F0
bracketleftex;F8EF
bracketleftmonospace;FF3B
bracketlefttp;F8EE
bracketright;005D
bracketrightbt;F8FB
bracketrightex;F8FA
bracketrightmonospace;FF3D
bracketrighttp;F8F9
breve;02D8
brevebelowcmb;032E
brevecmb;0306
breveinvertedbelowcmb;032F
breveinvertedcmb;0311
breveinverteddoublecmb;0361
bridgebelowcmb;032A
bridgeinvertedbelowcmb;033A
brokenbar;00A6
bstroke;0180
bsuperior;F6EA
btopbar;0183
buhiragana;3076
bukatakana;30D6
bullet;2022
bulletinverse;25D8
bulletoperator;2219
bullseye;25CE
c;0063
caarmenian;056E
cabengali;099A
cacute;0107
cadeva;091A
cagujarati;0A9A
cagurmukhi;0A1A
calsquare;3388
candrabindubengali;0981
candrabinducmb;0310
candrabindudeva;0901
candrabindugujarati;0A81
capslock;21EA
careof;2105
caron;02C7
caronbelowcmb;032C
caroncmb;030C
carriagereturn;21B5
cbopomofo;3118
ccaron;010D
ccedilla;00E7
ccedillaacute;1E09
ccircle;24D2
ccircumflex;0109
ccurl;0255
cdot;010B
cdotaccent;010B
cdsquare;33C5
cedilla;00B8
cedillacmb;0327
cent;00A2
centigrade;2103
centinferior;F6DF
centmonospace;FFE0
centoldstyle;F7A2
centsuperior;F6E0
chaarmenian;0579
chabengali;099B
chadeva;091B
chagujarati;0A9B
chagurmukhi;0A1B
chbopomofo;3114
cheabkhasiancyrillic;04BD
checkmark;2713
checyrillic;0447
chedescenderabkhasiancyrillic;04BF
chedescendercyrillic;04B7
chedieresiscyrillic;04F5
cheharmenian;0573
chekhakassiancyrillic;04CC
cheverticalstrokecyrillic;04B9
chi;03C7
chieuchacirclekorean;3277
chieuchaparenkorean;3217
chieuchcirclekorean;3269
chieuchkorean;314A
chieuchparenkorean;3209
chochangthai;0E0A
chochanthai;0E08
chochingthai;0E09
chochoethai;0E0C
chook;0188
cieucacirclekorean;3276
cieucaparenkorean;3216
cieuccirclekorean;3268
cieuckorean;3148
cieucparenkorean;3208
cieucuparenkorean;321C
circle;25CB
circlemultiply;2297
circleot;2299
circleplus;2295
circlepostalmark;3036
circlewithlefthalfblack;25D0
circlewithrighthalfblack;25D1
circumflex;02C6
circumflexbelowcmb;032D
circumflexcmb;0302
clear;2327
clickalveolar;01C2
clickdental;01C0
clicklateral;01C1
clickretroflex;01C3
club;2663
clubsuitblack;2663
clubsuitwhite;2667
cmcubedsquare;33A4
cmonospace;FF43
cmsquaredsquare;33A0
coarmenian;0581
colon;003A
colonmonetary;20A1
colonmonospace;FF1A
colonsign;20A1
colonsmall;FE55
colontriangularhalfmod;02D1
colontriangularmod;02D0
comma;002C
commaabovecmb;0313
commaaboverightcmb;0315
commaaccent;F6C3
commaarabic;060C
commaarmenian;055D
commainferior;F6E1
commamonospace;FF0C
commareversedabovecmb;0314
commareversedmod;02BD
commasmall;FE50
commasuperior;F6E2
commaturnedabovecmb;0312
commaturnedmod;02BB
compass;263C
congruent;2245
contourintegral;222E
control;2303
controlACK;0006
controlBEL;0007
controlBS;0008
controlCAN;0018
controlCR;000D
controlDC1;0011
controlDC2;0012
controlDC3;0013
controlDC4;0014
controlDEL;007F
controlDLE;0010
controlEM;0019
controlENQ;0005
controlEOT;0004
controlESC;001B
controlETB;0017
controlETX;0003
controlFF;000C
controlFS;001C
controlGS;001D
controlHT;0009
controlLF;000A
controlNAK;0015
controlRS;001E
controlSI;000F
controlSO;000E
controlSOT;0002
controlSTX;0001
controlSUB;001A
controlSYN;0016
controlUS;001F
controlVT;000B
copyright;00A9
copyrightsans;F8E9
copyrightserif;F6D9
cornerbracketleft;300C
cornerbracketlefthalfwidth;FF62
cornerbracketleftvertical;FE41
cornerbracketright;300D
cornerbracketrighthalfwidth;FF63
cornerbracketrightvertical;FE42
corporationsquare;337F
cosquare;33C7
coverkgsquare;33C6
cparen;249E
cruzeiro;20A2
cstretched;0297
curlyand;22CF
curlyor;22CE
currency;00A4
cyrBreve;F6D1
cyrFlex;F6D2
cyrbreve;F6D4
cyrflex;F6D5
d;0064
daarmenian;0564
dabengali;09A6
dadarabic;0636
dadeva;0926
dadfinalarabic;FEBE
dadinitialarabic;FEBF
dadmedialarabic;FEC0
dagesh;05BC
dageshhebrew;05BC
dagger;2020
daggerdbl;2021
dagujarati;0AA6
dagurmukhi;0A26
dahiragana;3060
dakatakana;30C0
dalarabic;062F
dalet;05D3
daletdagesh;FB33
daletdageshhebrew;FB33
dalethatafpatah;05D3 05B2
dalethatafpatahhebrew;05D3 05B2
dalethatafsegol;05D3 05B1
dalethatafsegolhebrew;05D3 05B1
dalethebrew;05D3
dalethiriq;05D3 05B4
dalethiriqhebrew;05D3 05B4
daletholam;05D3 05B9
daletholamhebrew;05D3 05B9
daletpatah;05D3 05B7
daletpatahhebrew;05D3 05B7
daletqamats;05D3 05B8
daletqamatshebrew;05D3 05B8
daletqubuts;05D3 05BB
daletqubutshebrew;05D3 05BB
daletsegol;05D3 05B6
daletsegolhebrew;05D3 05B6
daletsheva;05D3 05B0
daletshevahebrew;05D3 05B0
dalettsere;05D3 05B5
dalettserehebrew;05D3 05B5
dalfinalarabic;FEAA
dammaarabic;064F
dammalowarabic;064F
dammatanaltonearabic;064C
dammatanarabic;064C
danda;0964
dargahebrew;05A7
dargalefthebrew;05A7
dasiapneumatacyrilliccmb;0485
dblGrave;F6D3
dblanglebracketleft;300A
dblanglebracketleftvertical;FE3D
dblanglebracketright;300B
dblanglebracketrightvertical;FE3E
dblarchinvertedbelowcmb;032B
dblarrowleft;21D4
dblarrowright;21D2
dbldanda;0965
dblgrave;F6D6
dblgravecmb;030F
dblintegral;222C
dbllowline;2017
dbllowlinecmb;0333
dbloverlinecmb;033F
dblprimemod;02BA
dblverticalbar;2016
dblverticallineabovecmb;030E
dbopomofo;3109
dbsquare;33C8
dcaron;010F
dcedilla;1E11
dcircle;24D3
dcircumflexbelow;1E13
dcroat;0111
ddabengali;09A1
ddadeva;0921
ddagujarati;0AA1
ddagurmukhi;0A21
ddalarabic;0688
ddalfinalarabic;FB89
dddhadeva;095C
ddhabengali;09A2
ddhadeva;0922
ddhagujarati;0AA2
ddhagurmukhi;0A22
ddotaccent;1E0B
ddotbelow;1E0D
decimalseparatorarabic;066B
decimalseparatorpersian;066B
decyrillic;0434
degree;00B0
dehihebrew;05AD
dehiragana;3067
deicoptic;03EF
dekatakana;30C7
deleteleft;232B
deleteright;2326
delta;03B4
deltaturned;018D
denominatorminusonenumeratorbengali;09F8
dezh;02A4
dhabengali;09A7
dhadeva;0927
dhagujarati;0AA7
dhagurmukhi;0A27
dhook;0257
dialytikatonos;0385
dialytikatonoscmb;0344
diamond;2666
diamondsuitwhite;2662
dieresis;00A8
dieresisacute;F6D7
dieresisbelowcmb;0324
dieresiscmb;0308
dieresisgrave;F6D8
dieresistonos;0385
dihiragana;3062
dikatakana;30C2
dittomark;3003
divide;00F7
divides;2223
divisionslash;2215
djecyrillic;0452
dkshade;2593
dlinebelow;1E0F
dlsquare;3397
dmacron;0111
dmonospace;FF44
dnblock;2584
dochadathai;0E0E
dodekthai;0E14
dohiragana;3069
dokatakana;30C9
dollar;0024
dollarinferior;F6E3
dollarmonospace;FF04
dollaroldstyle;F724
dollarsmall;FE69
dollarsuperior;F6E4
dong;20AB
dorusquare;3326
dotaccent;02D9
dotaccentcmb;0307
dotbelowcmb;0323
dotbelowcomb;0323
dotkatakana;30FB
dotlessi;0131
dotlessj;F6BE
dotlessjstrokehook;0284
dotmath;22C5
dottedcircle;25CC
doubleyodpatah;FB1F
doubleyodpatahhebrew;FB1F
downtackbelowcmb;031E
downtackmod;02D5
dparen;249F
dsuperior;F6EB
dtail;0256
dtopbar;018C
duhiragana;3065
dukatakana;30C5
dz;01F3
dzaltone;02A3
dzcaron;01C6
dzcurl;02A5
dzeabkhasiancyrillic;04E1
dzecyrillic;0455
dzhecyrillic;045F
e;0065
eacute;00E9
earth;2641
ebengali;098F
ebopomofo;311C
ebreve;0115
ecandradeva;090D
ecandragujarati;0A8D
ecandravowelsigndeva;0945
ecandravowelsigngujarati;0AC5
ecaron;011B
ecedillabreve;1E1D
echarmenian;0565
echyiwnarmenian;0587
ecircle;24D4
ecircumflex;00EA
ecircumflexacute;1EBF
ecircumflexbelow;1E19
ecircumflexdotbelow;1EC7
ecircumflexgrave;1EC1
ecircumflexhookabove;1EC3
ecircumflextilde;1EC5
ecyrillic;0454
edblgrave;0205
edeva;090F
edieresis;00EB
edot;0117
edotaccent;0117
edotbelow;1EB9
eegurmukhi;0A0F
eematragurmukhi;0A47
efcyrillic;0444
egrave;00E8
egujarati;0A8F
eharmenian;0567
ehbopomofo;311D
ehiragana;3048
ehookabove;1EBB
eibopomofo;311F
eight;0038
eightarabic;0668
eightbengali;09EE
eightcircle;2467
eightcircleinversesansserif;2791
eightdeva;096E
eighteencircle;2471
eighteenparen;2485
eighteenperiod;2499
eightgujarati;0AEE
eightgurmukhi;0A6E
eighthackarabic;0668
eighthangzhou;3028
eighthnotebeamed;266B
eightideographicparen;3227
eightinferior;2088
eightmonospace;FF18
eightoldstyle;F738
eightparen;247B
eightperiod;248F
eightpersian;06F8
eightroman;2177
eightsuperior;2078
eightthai;0E58
einvertedbreve;0207
eiotifiedcyrillic;0465
ekatakana;30A8
ekatakanahalfwidth;FF74
ekonkargurmukhi;0A74
ekorean;3154
elcyrillic;043B
element;2208
elevencircle;246A
elevenparen;247E
elevenperiod;2492
elevenroman;217A
ellipsis;2026
ellipsisvertical;22EE
emacron;0113
emacronacute;1E17
emacrongrave;1E15
emcyrillic;043C
emdash;2014
emdashvertical;FE31
emonospace;FF45
emphasismarkarmenian;055B
emptyset;2205
enbopomofo;3123
encyrillic;043D
endash;2013
endashvertical;FE32
endescendercyrillic;04A3
eng;014B
engbopomofo;3125
enghecyrillic;04A5
enhookcyrillic;04C8
enspace;2002
eogonek;0119
eokorean;3153
eopen;025B
eopenclosed;029A
eopenreversed;025C
eopenreversedclosed;025E
eopenreversedhook;025D
eparen;24A0
epsilon;03B5
epsilontonos;03AD
equal;003D
equalmonospace;FF1D
equalsmall;FE66
equalsuperior;207C
equivalence;2261
erbopomofo;3126
ercyrillic;0440
ereversed;0258
ereversedcyrillic;044D
escyrillic;0441
esdescendercyrillic;04AB
esh;0283
eshcurl;0286
eshortdeva;090E
eshortvowelsigndeva;0946
eshreversedloop;01AA
eshsquatreversed;0285
esmallhiragana;3047
esmallkatakana;30A7
esmallkatakanahalfwidth;FF6A
estimated;212E
esuperior;F6EC
eta;03B7
etarmenian;0568
etatonos;03AE
eth;00F0
etilde;1EBD
etildebelow;1E1B
etnahtafoukhhebrew;0591
etnahtafoukhlefthebrew;0591
etnahtahebrew;0591
etnahtalefthebrew;0591
eturned;01DD
eukorean;3161
euro;20AC
evowelsignbengali;09C7
evowelsigndeva;0947
evowelsigngujarati;0AC7
exclam;0021
exclamarmenian;055C
exclamdbl;203C
exclamdown;00A1
exclamdownsmall;F7A1
exclammonospace;FF01
exclamsmall;F721
existential;2203
ezh;0292
ezhcaron;01EF
ezhcurl;0293
ezhreversed;01B9
ezhtail;01BA
f;0066
fadeva;095E
fagurmukhi;0A5E
fahrenheit;2109
fathaarabic;064E
fathalowarabic;064E
fathatanarabic;064B
fbopomofo;3108
fcircle;24D5
fdotaccent;1E1F
feharabic;0641
feharmenian;0586
fehfinalarabic;FED2
fehinitialarabic;FED3
fehmedialarabic;FED4
feicoptic;03E5
female;2640
ff;FB00
ffi;FB03
ffl;FB04
fi;FB01
fifteencircle;246E
fifteenparen;2482
fifteenperiod;2496
figuredash;2012
filledbox;25A0
filledrect;25AC
finalkaf;05DA
finalkafdagesh;FB3A
finalkafdageshhebrew;FB3A
finalkafhebrew;05DA
finalkafqamats;05DA 05B8
finalkafqamatshebrew;05DA 05B8
finalkafsheva;05DA 05B0
finalkafshevahebrew;05DA 05B0
finalmem;05DD
finalmemhebrew;05DD
finalnun;05DF
finalnunhebrew;05DF
finalpe;05E3
finalpehebrew;05E3
finaltsadi;05E5
finaltsadihebrew;05E5
firsttonechinese;02C9
fisheye;25C9
fitacyrillic;0473
five;0035
fivearabic;0665
fivebengali;09EB
fivecircle;2464
fivecircleinversesansserif;278E
fivedeva;096B
fiveeighths;215D
fivegujarati;0AEB
fivegurmukhi;0A6B
fivehackarabic;0665
fivehangzhou;3025
fiveideographicparen;3224
fiveinferior;2085
fivemonospace;FF15
fiveoldstyle;F735
fiveparen;2478
fiveperiod;248C
fivepersian;06F5
fiveroman;2174
fivesuperior;2075
fivethai;0E55
fl;FB02
florin;0192
fmonospace;FF46
fmsquare;3399
fofanthai;0E1F
fofathai;0E1D
fongmanthai;0E4F
forall;2200
four;0034
fourarabic;0664
fourbengali;09EA
fourcircle;2463
fourcircleinversesansserif;278D
fourdeva;096A
fourgujarati;0AEA
fourgurmukhi;0A6A
fourhackarabic;0664
fourhangzhou;3024
fourideographicparen;3223
fourinferior;2084
fourmonospace;FF14
fournumeratorbengali;09F7
fouroldstyle;F734
fourparen;2477
fourperiod;248B
fourpersian;06F4
fourroman;2173
foursuperior;2074
fourteencircle;246D
fourteenparen;2481
fourteenperiod;2495
fourthai;0E54
fourthtonechinese;02CB
fparen;24A1
fraction;2044
franc;20A3
g;0067
gabengali;0997
gacute;01F5
gadeva;0917
gafarabic;06AF
gaffinalarabic;FB93
gafinitialarabic;FB94
gafmedialarabic;FB95
gagujarati;0A97
gagurmukhi;0A17
gahiragana;304C
gakatakana;30AC
gamma;03B3
gammalatinsmall;0263
gammasuperior;02E0
gangiacoptic;03EB
gbopomofo;310D
gbreve;011F
gcaron;01E7
gcedilla;0123
gcircle;24D6
gcircumflex;011D
gcommaaccent;0123
gdot;0121
gdotaccent;0121
gecyrillic;0433
gehiragana;3052
gekatakana;30B2
geometricallyequal;2251
gereshaccenthebrew;059C
gereshhebrew;05F3
gereshmuqdamhebrew;059D
germandbls;00DF
gershayimaccenthebrew;059E
gershayimhebrew;05F4
getamark;3013
ghabengali;0998
ghadarmenian;0572
ghadeva;0918
ghagujarati;0A98
ghagurmukhi;0A18
ghainarabic;063A
ghainfinalarabic;FECE
ghaininitialarabic;FECF
ghainmedialarabic;FED0
ghemiddlehookcyrillic;0495
ghestrokecyrillic;0493
gheupturncyrillic;0491
ghhadeva;095A
ghhagurmukhi;0A5A
ghook;0260
ghzsquare;3393
gihiragana;304E
gikatakana;30AE
gimarmenian;0563
gimel;05D2
gimeldagesh;FB32
gimeldageshhebrew;FB32
gimelhebrew;05D2
gjecyrillic;0453
glottalinvertedstroke;01BE
glottalstop;0294
glottalstopinverted;0296
glottalstopmod;02C0
glottalstopreversed;0295
glottalstopreversedmod;02C1
glottalstopreversedsuperior;02E4
glottalstopstroke;02A1
glottalstopstrokereversed;02A2
gmacron;1E21
gmonospace;FF47
gohiragana;3054
gokatakana;30B4
gparen;24A2
gpasquare;33AC
gradient;2207
grave;0060
gravebelowcmb;0316
gravecmb;0300
gravecomb;0300
gravedeva;0953
gravelowmod;02CE
gravemonospace;FF40
gravetonecmb;0340
greater;003E
greaterequal;2265
greaterequalorless;22DB
greatermonospace;FF1E
greaterorequivalent;2273
greaterorless;2277
greateroverequal;2267
greatersmall;FE65
gscript;0261
gstroke;01E5
guhiragana;3050
guillemotleft;00AB
guillemotright;00BB
guilsinglleft;2039
guilsinglright;203A
gukatakana;30B0
guramusquare;3318
gysquare;33C9
h;0068
haabkhasiancyrillic;04A9
haaltonearabic;06C1
habengali;09B9
hadescendercyrillic;04B3
hadeva;0939
hagujarati;0AB9
hagurmukhi;0A39
haharabic;062D
hahfinalarabic;FEA2
hahinitialarabic;FEA3
hahiragana;306F
hahmedialarabic;FEA4
haitusquare;332A
hakatakana;30CF
hakatakanahalfwidth;FF8A
halantgurmukhi;0A4D
hamzaarabic;0621
hamzadammaarabic;0621 064F
hamzadammatanarabic;0621 064C
hamzafathaarabic;0621 064E
hamzafathatanarabic;0621 064B
hamzalowarabic;0621
hamzalowkasraarabic;0621 0650
hamzalowkasratanarabic;0621 064D
hamzasukunarabic;0621 0652
hangulfiller;3164
hardsigncyrillic;044A
harpoonleftbarbup;21BC
harpoonrightbarbup;21C0
hasquare;33CA
hatafpatah;05B2
hatafpatah16;05B2
hatafpatah23;05B2
hatafpatah2f;05B2
hatafpatahhebrew;05B2
hatafpatahnarrowhebrew;05B2
hatafpatahquarterhebrew;05B2
hatafpatahwidehebrew;05B2
hatafqamats;05B3
hatafqamats1b;05B3
hatafqamats28;05B3
hatafqamats34;05B3
hatafqamatshebrew;05B3
hatafqamatsnarrowhebrew;05B3
hatafqamatsquarterhebrew;05B3
hatafqamatswidehebrew;05B3
hatafsegol;05B1
hatafsegol17;05B1
hatafsegol24;05B1
hatafsegol30;05B1
hatafsegolhebrew;05B1
hatafsegolnarrowhebrew;05B1
hatafsegolquarterhebrew;05B1
hatafsegolwidehebrew;05B1
hbar;0127
hbopomofo;310F
hbrevebelow;1E2B
hcedilla;1E29
hcircle;24D7
hcircumflex;0125
hdieresis;1E27
hdotaccent;1E23
hdotbelow;1E25
he;05D4
heart;2665
heartsuitblack;2665
heartsuitwhite;2661
hedagesh;FB34
hedageshhebrew;FB34
hehaltonearabic;06C1
heharabic;0647
hehebrew;05D4
hehfinalaltonearabic;FBA7
hehfinalalttwoarabic;FEEA
hehfinalarabic;FEEA
hehhamzaabovefinalarabic;FBA5
hehhamzaaboveisolatedarabic;FBA4
hehinitialaltonearabic;FBA8
hehinitialarabic;FEEB
hehiragana;3078
hehmedialaltonearabic;FBA9
hehmedialarabic;FEEC
heiseierasquare;337B
hekatakana;30D8
hekatakanahalfwidth;FF8D
hekutaarusquare;3336
henghook;0267
herutusquare;3339
het;05D7
hethebrew;05D7
hhook;0266
hhooksuperior;02B1
hieuhacirclekorean;327B
hieuhaparenkorean;321B
hieuhcirclekorean;326D
hieuhkorean;314E
hieuhparenkorean;320D
hihiragana;3072
hikatakana;30D2
hikatakanahalfwidth;FF8B
hiriq;05B4
hiriq14;05B4
hiriq21;05B4
hiriq2d;05B4
hiriqhebrew;05B4
hiriqnarrowhebrew;05B4
hiriqquarterhebrew;05B4
hiriqwidehebrew;05B4
hlinebelow;1E96
hmonospace;FF48
hoarmenian;0570
hohipthai;0E2B
hohiragana;307B
hokatakana;30DB
hokatakanahalfwidth;FF8E
holam;05B9
holam19;05B9
holam26;05B9
holam32;05B9
holamhebrew;05B9
holamnarrowhebrew;05B9
holamquarterhebrew;05B9
holamwidehebrew;05B9
honokhukthai;0E2E
hookabovecomb;0309
hookcmb;0309
hookpalatalizedbelowcmb;0321
hookretroflexbelowcmb;0322
hoonsquare;3342
horicoptic;03E9
horizontalbar;2015
horncmb;031B
hotsprings;2668
house;2302
hparen;24A3
hsuperior;02B0
hturned;0265
huhiragana;3075
huiitosquare;3333
hukatakana;30D5
hukatakanahalfwidth;FF8C
hungarumlaut;02DD
hungarumlautcmb;030B
hv;0195
hyphen;002D
hypheninferior;F6E5
hyphenmonospace;FF0D
hyphensmall;FE63
hyphensuperior;F6E6
hyphentwo;2010
i;0069
iacute;00ED
iacyrillic;044F
ibengali;0987
ibopomofo;3127
ibreve;012D
icaron;01D0
icircle;24D8
icircumflex;00EE
icyrillic;0456
idblgrave;0209
ideographearthcircle;328F
ideographfirecircle;328B
ideographicallianceparen;323F
ideographiccallparen;323A
ideographiccentrecircle;32A5
ideographicclose;3006
ideographiccomma;3001
ideographiccommaleft;FF64
ideographiccongratulationparen;3237
ideographiccorrectcircle;32A3
ideographicearthparen;322F
ideographicenterpriseparen;323D
ideographicexcellentcircle;329D
ideographicfestivalparen;3240
ideographicfinancialcircle;3296
ideographicfinancialparen;3236
ideographicfireparen;322B
ideographichaveparen;3232
ideographichighcircle;32A4
ideographiciterationmark;3005
ideographiclaborcircle;3298
ideographiclaborparen;3238
ideographicleftcircle;32A7
ideographiclowcircle;32A6
ideographicmedicinecircle;32A9
ideographicmetalparen;322E
ideographicmoonparen;322A
ideographicnameparen;3234
ideographicperiod;3002
ideographicprintcircle;329E
ideographicreachparen;3243
ideographicrepresentparen;3239
ideographicresourceparen;323E
ideographicrightcircle;32A8
ideographicsecretcircle;3299
ideographicselfparen;3242
ideographicsocietyparen;3233
ideographicspace;3000
ideographicspecialparen;3235
ideographicstockparen;3231
ideographicstudyparen;323B
ideographicsunparen;3230
ideographicsuperviseparen;323C
ideographicwaterparen;322C
ideographicwoodparen;322D
ideographiczero;3007
ideographmetalcircle;328E
ideographmooncircle;328A
ideographnamecircle;3294
ideographsuncircle;3290
ideographwatercircle;328C
ideographwoodcircle;328D
ideva;0907
idieresis;00EF
idieresisacute;1E2F
idieresiscyrillic;04E5
idotbelow;1ECB
iebrevecyrillic;04D7
iecyrillic;0435
ieungacirclekorean;3275
ieungaparenkorean;3215
ieungcirclekorean;3267
ieungkorean;3147
ieungparenkorean;3207
igrave;00EC
igujarati;0A87
igurmukhi;0A07
ihiragana;3044
ihookabove;1EC9
iibengali;0988
iicyrillic;0438
iideva;0908
iigujarati;0A88
iigurmukhi;0A08
iimatragurmukhi;0A40
iinvertedbreve;020B
iishortcyrillic;0439
iivowelsignbengali;09C0
iivowelsigndeva;0940
iivowelsigngujarati;0AC0
ij;0133
ikatakana;30A4
ikatakanahalfwidth;FF72
ikorean;3163
ilde;02DC
iluyhebrew;05AC
imacron;012B
imacroncyrillic;04E3
imageorapproximatelyequal;2253
imatragurmukhi;0A3F
imonospace;FF49
increment;2206
infinity;221E
iniarmenian;056B
integral;222B
integralbottom;2321
integralbt;2321
integralex;F8F5
integraltop;2320
integraltp;2320
intersection;2229
intisquare;3305
invbullet;25D8
invcircle;25D9
invsmileface;263B
iocyrillic;0451
iogonek;012F
iota;03B9
iotadieresis;03CA
iotadieresistonos;0390
iotalatin;0269
iotatonos;03AF
iparen;24A4
irigurmukhi;0A72
ismallhiragana;3043
ismallkatakana;30A3
ismallkatakanahalfwidth;FF68
issharbengali;09FA
istroke;0268
isuperior;F6ED
iterationhiragana;309D
iterationkatakana;30FD
itilde;0129
itildebelow;1E2D
iubopomofo;3129
iucyrillic;044E
ivowelsignbengali;09BF
ivowelsigndeva;093F
ivowelsigngujarati;0ABF
izhitsacyrillic;0475
izhitsadblgravecyrillic;0477
j;006A
jaarmenian;0571
jabengali;099C
jadeva;091C
jagujarati;0A9C
jagurmukhi;0A1C
jbopomofo;3110
jcaron;01F0
jcircle;24D9
jcircumflex;0135
jcrossedtail;029D
jdotlessstroke;025F
jecyrillic;0458
jeemarabic;062C
jeemfinalarabic;FE9E
jeeminitialarabic;FE9F
jeemmedialarabic;FEA0
jeharabic;0698
jehfinalarabic;FB8B
jhabengali;099D
jhadeva;091D
jhagujarati;0A9D
jhagurmukhi;0A1D
jheharmenian;057B
jis;3004
jmonospace;FF4A
jparen;24A5
jsuperior;02B2
k;006B
kabashkircyrillic;04A1
kabengali;0995
kacute;1E31
kacyrillic;043A
kadescendercyrillic;049B
kadeva;0915
kaf;05DB
kafarabic;0643
kafdagesh;FB3B
kafdageshhebrew;FB3B
kaffinalarabic;FEDA
kafhebrew;05DB
kafinitialarabic;FEDB
kafmedialarabic;FEDC
kafrafehebrew;FB4D
kagujarati;0A95
kagurmukhi;0A15
kahiragana;304B
kahookcyrillic;04C4
kakatakana;30AB
kakatakanahalfwidth;FF76
kappa;03BA
kappasymbolgreek;03F0
kapyeounmieumkorean;3171
kapyeounphieuphkorean;3184
kapyeounpieupkorean;3178
kapyeounssangpieupkorean;3179
karoriisquare;330D
kashidaautoarabic;0640
kashidaautonosidebearingarabic;0640
kasmallkatakana;30F5
kasquare;3384
kasraarabic;0650
kasratanarabic;064D
kastrokecyrillic;049F
katahiraprolongmarkhalfwidth;FF70
kaverticalstrokecyrillic;049D
kbopomofo;310E
kcalsquare;3389
kcaron;01E9
kcedilla;0137
kcircle;24DA
kcommaaccent;0137
kdotbelow;1E33
keharmenian;0584
kehiragana;3051
kekatakana;30B1
kekatakanahalfwidth;FF79
kenarmenian;056F
kesmallkatakana;30F6
kgreenlandic;0138
khabengali;0996
khacyrillic;0445
khadeva;0916
khagujarati;0A96
khagurmukhi;0A16
khaharabic;062E
khahfinalarabic;FEA6
khahinitialarabic;FEA7
khahmedialarabic;FEA8
kheicoptic;03E7
khhadeva;0959
khhagurmukhi;0A59
khieukhacirclekorean;3278
khieukhaparenkorean;3218
khieukhcirclekorean;326A
khieukhkorean;314B
khieukhparenkorean;320A
khokhaithai;0E02
khokhonthai;0E05
khokhuatthai;0E03
khokhwaithai;0E04
khomutthai;0E5B
khook;0199
khorakhangthai;0E06
khzsquare;3391
kihiragana;304D
kikatakana;30AD
kikatakanahalfwidth;FF77
kiroguramusquare;3315
kiromeetorusquare;3316
kirosquare;3314
kiyeokacirclekorean;326E
kiyeokaparenkorean;320E
kiyeokcirclekorean;3260
kiyeokkorean;3131
kiyeokparenkorean;3200
kiyeoksioskorean;3133
kjecyrillic;045C
klinebelow;1E35
klsquare;3398
kmcubedsquare;33A6
kmonospace;FF4B
kmsquaredsquare;33A2
kohiragana;3053
kohmsquare;33C0
kokaithai;0E01
kokatakana;30B3
kokatakanahalfwidth;FF7A
kooposquare;331E
koppacyrillic;0481
koreanstandardsymbol;327F
koroniscmb;0343
kparen;24A6
kpasquare;33AA
ksicyrillic;046F
ktsquare;33CF
kturned;029E
kuhiragana;304F
kukatakana;30AF
kukatakanahalfwidth;FF78
kvsquare;33B8
kwsquare;33BE
l;006C
labengali;09B2
lacute;013A
ladeva;0932
lagujarati;0AB2
lagurmukhi;0A32
lakkhangyaothai;0E45
lamaleffinalarabic;FEFC
lamalefhamzaabovefinalarabic;FEF8
lamalefhamzaaboveisolatedarabic;FEF7
lamalefhamzabelowfinalarabic;FEFA
lamalefhamzabelowisolatedarabic;FEF9
lamalefisolatedarabic;FEFB
lamalefmaddaabovefinalarabic;FEF6
lamalefmaddaaboveisolatedarabic;FEF5
lamarabic;0644
lambda;03BB
lambdastroke;019B
lamed;05DC
lameddagesh;FB3C
lameddageshhebrew;FB3C
lamedhebrew;05DC
lamedholam;05DC 05B9
lamedholamdagesh;05DC 05B9 05BC
lamedholamdageshhebrew;05DC 05B9 05BC
lamedholamhebrew;05DC 05B9
lamfinalarabic;FEDE
lamhahinitialarabic;FCCA
laminitialarabic;FEDF
lamjeeminitialarabic;FCC9
lamkhahinitialarabic;FCCB
lamlamhehisolatedarabic;FDF2
lammedialarabic;FEE0
lammeemhahinitialarabic;FD88
lammeeminitialarabic;FCCC
lammeemjeeminitialarabic;FEDF FEE4 FEA0
lammeemkhahinitialarabic;FEDF FEE4 FEA8
largecircle;25EF
lbar;019A
lbelt;026C
lbopomofo;310C
lcaron;013E
lcedilla;013C
lcircle;24DB
lcircumflexbelow;1E3D
lcommaaccent;013C
ldot;0140
ldotaccent;0140
ldotbelow;1E37
ldotbelowmacron;1E39
leftangleabovecmb;031A
lefttackbelowcmb;0318
less;003C
lessequal;2264
lessequalorgreater;22DA
lessmonospace;FF1C
lessorequivalent;2272
lessorgreater;2276
lessoverequal;2266
lesssmall;FE64
lezh;026E
lfblock;258C
lhookretroflex;026D
lira;20A4
liwnarmenian;056C
lj;01C9
ljecyrillic;0459
ll;F6C0
lladeva;0933
llagujarati;0AB3
llinebelow;1E3B
llladeva;0934
llvocalicbengali;09E1
llvocalicdeva;0961
llvocalicvowelsignbengali;09E3
llvocalicvowelsigndeva;0963
lmiddletilde;026B
lmonospace;FF4C
lmsquare;33D0
lochulathai;0E2C
logicaland;2227
logicalnot;00AC
logicalnotreversed;2310
logicalor;2228
lolingthai;0E25
longs;017F
lowlinecenterline;FE4E
lowlinecmb;0332
lowlinedashed;FE4D
lozenge;25CA
lparen;24A7
lslash;0142
lsquare;2113
lsuperior;F6EE
ltshade;2591
luthai;0E26
lvocalicbengali;098C
lvocalicdeva;090C
lvocalicvowelsignbengali;09E2
lvocalicvowelsigndeva;0962
lxsquare;33D3
m;006D
mabengali;09AE
macron;00AF
macronbelowcmb;0331
macroncmb;0304
macronlowmod;02CD
macronmonospace;FFE3
macute;1E3F
madeva;092E
magujarati;0AAE
magurmukhi;0A2E
mahapakhhebrew;05A4
mahapakhlefthebrew;05A4
mahiragana;307E
maichattawalowleftthai;F895
maichattawalowrightthai;F894
maichattawathai;0E4B
maichattawaupperleftthai;F893
maieklowleftthai;F88C
maieklowrightthai;F88B
maiekthai;0E48
maiekupperleftthai;F88A
maihanakatleftthai;F884
maihanakatthai;0E31
maitaikhuleftthai;F889
maitaikhuthai;0E47
maitholowleftthai;F88F
maitholowrightthai;F88E
maithothai;0E49
maithoupperleftthai;F88D
maitrilowleftthai;F892
maitrilowrightthai;F891
maitrithai;0E4A
maitriupperleftthai;F890
maiyamokthai;0E46
makatakana;30DE
makatakanahalfwidth;FF8F
male;2642
mansyonsquare;3347
maqafhebrew;05BE
mars;2642
masoracirclehebrew;05AF
masquare;3383
mbopomofo;3107
mbsquare;33D4
mcircle;24DC
mcubedsquare;33A5
mdotaccent;1E41
mdotbelow;1E43
meemarabic;0645
meemfinalarabic;FEE2
meeminitialarabic;FEE3
meemmedialarabic;FEE4
meemmeeminitialarabic;FCD1
meemmeemisolatedarabic;FC48
meetorusquare;334D
mehiragana;3081
meizierasquare;337E
mekatakana;30E1
mekatakanahalfwidth;FF92
mem;05DE
memdagesh;FB3E
memdageshhebrew;FB3E
memhebrew;05DE
menarmenian;0574
merkhahebrew;05A5
merkhakefulahebrew;05A6
merkhakefulalefthebrew;05A6
merkhalefthebrew;05A5
mhook;0271
mhzsquare;3392
middledotkatakanahalfwidth;FF65
middot;00B7
mieumacirclekorean;3272
mieumaparenkorean;3212
mieumcirclekorean;3264
mieumkorean;3141
mieumpansioskorean;3170
mieumparenkorean;3204
mieumpieupkorean;316E
mieumsioskorean;316F
mihiragana;307F
mikatakana;30DF
mikatakanahalfwidth;FF90
minus;2212
minusbelowcmb;0320
minuscircle;2296
minusmod;02D7
minusplus;2213
minute;2032
miribaarusquare;334A
mirisquare;3349
mlonglegturned;0270
mlsquare;3396
mmcubedsquare;33A3
mmonospace;FF4D
mmsquaredsquare;339F
mohiragana;3082
mohmsquare;33C1
mokatakana;30E2
mokatakanahalfwidth;FF93
molsquare;33D6
momathai;0E21
moverssquare;33A7
moverssquaredsquare;33A8
mparen;24A8
mpasquare;33AB
mssquare;33B3
msuperior;F6EF
mturned;026F
mu;00B5
mu1;00B5
muasquare;3382
muchgreater;226B
muchless;226A
mufsquare;338C
mugreek;03BC
mugsquare;338D
muhiragana;3080
mukatakana;30E0
mukatakanahalfwidth;FF91
mulsquare;3395
multiply;00D7
mumsquare;339B
munahhebrew;05A3
munahlefthebrew;05A3
musicalnote;266A
musicalnotedbl;266B
musicflatsign;266D
musicsharpsign;266F
mussquare;33B2
muvsquare;33B6
muwsquare;33BC
mvmegasquare;33B9
mvsquare;33B7
mwmegasquare;33BF
mwsquare;33BD
n;006E
nabengali;09A8
nabla;2207
nacute;0144
nadeva;0928
nagujarati;0AA8
nagurmukhi;0A28
nahiragana;306A
nakatakana;30CA
nakatakanahalfwidth;FF85
napostrophe;0149
nasquare;3381
nbopomofo;310B
nbspace;00A0
ncaron;0148
ncedilla;0146
ncircle;24DD
ncircumflexbelow;1E4B
ncommaaccent;0146
ndotaccent;1E45
ndotbelow;1E47
nehiragana;306D
nekatakana;30CD
nekatakanahalfwidth;FF88
newsheqelsign;20AA
nfsquare;338B
ngabengali;0999
ngadeva;0919
ngagujarati;0A99
ngagurmukhi;0A19
ngonguthai;0E07
nhiragana;3093
nhookleft;0272
nhookretroflex;0273
nieunacirclekorean;326F
nieunaparenkorean;320F
nieuncieuckorean;3135
nieuncirclekorean;3261
nieunhieuhkorean;3136
nieunkorean;3134
nieunpansioskorean;3168
nieunparenkorean;3201
nieunsioskorean;3167
nieuntikeutkorean;3166
nihiragana;306B
nikatakana;30CB
nikatakanahalfwidth;FF86
nikhahitleftthai;F899
nikhahitthai;0E4D
nine;0039
ninearabic;0669
ninebengali;09EF
ninecircle;2468
ninecircleinversesansserif;2792
ninedeva;096F
ninegujarati;0AEF
ninegurmukhi;0A6F
ninehackarabic;0669
ninehangzhou;3029
nineideographicparen;3228
nineinferior;2089
ninemonospace;FF19
nineoldstyle;F739
nineparen;247C
nineperiod;2490
ninepersian;06F9
nineroman;2178
ninesuperior;2079
nineteencircle;2472
nineteenparen;2486
nineteenperiod;249A
ninethai;0E59
nj;01CC
njecyrillic;045A
nkatakana;30F3
nkatakanahalfwidth;FF9D
nlegrightlong;019E
nlinebelow;1E49
nmonospace;FF4E
nmsquare;339A
nnabengali;09A3
nnadeva;0923
nnagujarati;0AA3
nnagurmukhi;0A23
nnnadeva;0929
nohiragana;306E
nokatakana;30CE
nokatakanahalfwidth;FF89
nonbreakingspace;00A0
nonenthai;0E13
nonuthai;0E19
noonarabic;0646
noonfinalarabic;FEE6
noonghunnaarabic;06BA
noonghunnafinalarabic;FB9F
noonhehinitialarabic;FEE7 FEEC
nooninitialarabic;FEE7
noonjeeminitialarabic;FCD2
noonjeemisolatedarabic;FC4B
noonmedialarabic;FEE8
noonmeeminitialarabic;FCD5
noonmeemisolatedarabic;FC4E
noonnoonfinalarabic;FC8D
notcontains;220C
notelement;2209
notelementof;2209
notequal;2260
notgreater;226F
notgreaternorequal;2271
notgreaternorless;2279
notidentical;2262
notless;226E
notlessnorequal;2270
notparallel;2226
notprecedes;2280
notsubset;2284
notsucceeds;2281
notsuperset;2285
nowarmenian;0576
nparen;24A9
nssquare;33B1
nsuperior;207F
ntilde;00F1
nu;03BD
nuhiragana;306C
nukatakana;30CC
nukatakanahalfwidth;FF87
nuktabengali;09BC
nuktadeva;093C
nuktagujarati;0ABC
nuktagurmukhi;0A3C
numbersign;0023
numbersignmonospace;FF03
numbersignsmall;FE5F
numeralsigngreek;0374
numeralsignlowergreek;0375
numero;2116
nun;05E0
nundagesh;FB40
nundageshhebrew;FB40
nunhebrew;05E0
nvsquare;33B5
nwsquare;33BB
nyabengali;099E
nyadeva;091E
nyagujarati;0A9E
nyagurmukhi;0A1E
o;006F
oacute;00F3
oangthai;0E2D
obarred;0275
obarredcyrillic;04E9
obarreddieresiscyrillic;04EB
obengali;0993
obopomofo;311B
obreve;014F
ocandradeva;0911
ocandragujarati;0A91
ocandravowelsigndeva;0949
ocandravowelsigngujarati;0AC9
ocaron;01D2
ocircle;24DE
ocircumflex;00F4
ocircumflexacute;1ED1
ocircumflexdotbelow;1ED9
ocircumflexgrave;1ED3
ocircumflexhookabove;1ED5
ocircumflextilde;1ED7
ocyrillic;043E
odblacute;0151
odblgrave;020D
odeva;0913
odieresis;00F6
odieresiscyrillic;04E7
odotbelow;1ECD
oe;0153
oekorean;315A
ogonek;02DB
ogonekcmb;0328
ograve;00F2
ogujarati;0A93
oharmenian;0585
ohiragana;304A
ohookabove;1ECF
ohorn;01A1
ohornacute;1EDB
ohorndotbelow;1EE3
ohorngrave;1EDD
ohornhookabove;1EDF
ohorntilde;1EE1
ohungarumlaut;0151
oi;01A3
oinvertedbreve;020F
okatakana;30AA
okatakanahalfwidth;FF75
okorean;3157
olehebrew;05AB
omacron;014D
omacronacute;1E53
omacrongrave;1E51
omdeva;0950
omega;03C9
omega1;03D6
omegacyrillic;0461
omegalatinclosed;0277
omegaroundcyrillic;047B
omegatitlocyrillic;047D
omegatonos;03CE
omgujarati;0AD0
omicron;03BF
omicrontonos;03CC
omonospace;FF4F
one;0031
onearabic;0661
onebengali;09E7
onecircle;2460
onecircleinversesansserif;278A
onedeva;0967
onedotenleader;2024
oneeighth;215B
onefitted;F6DC
onegujarati;0AE7
onegurmukhi;0A67
onehackarabic;0661
onehalf;00BD
onehangzhou;3021
oneideographicparen;3220
oneinferior;2081
onemonospace;FF11
onenumeratorbengali;09F4
oneoldstyle;F731
oneparen;2474
oneperiod;2488
onepersian;06F1
onequarter;00BC
oneroman;2170
onesuperior;00B9
onethai;0E51
onethird;2153
oogonek;01EB
oogonekmacron;01ED
oogurmukhi;0A13
oomatragurmukhi;0A4B
oopen;0254
oparen;24AA
openbullet;25E6
option;2325
ordfeminine;00AA
ordmasculine;00BA
orthogonal;221F
oshortdeva;0912
oshortvowelsigndeva;094A
oslash;00F8
oslashacute;01FF
osmallhiragana;3049
osmallkatakana;30A9
osmallkatakanahalfwidth;FF6B
ostrokeacute;01FF
osuperior;F6F0
otcyrillic;047F
otilde;00F5
otildeacute;1E4D
otildedieresis;1E4F
oubopomofo;3121
overline;203E
overlinecenterline;FE4A
overlinecmb;0305
overlinedashed;FE49
overlinedblwavy;FE4C
overlinewavy;FE4B
overscore;00AF
ovowelsignbengali;09CB
ovowelsigndeva;094B
ovowelsigngujarati;0ACB
p;0070
paampssquare;3380
paasentosquare;332B
pabengali;09AA
pacute;1E55
padeva;092A
pagedown;21DF
pageup;21DE
pagujarati;0AAA
pagurmukhi;0A2A
pahiragana;3071
paiyannoithai;0E2F
pakatakana;30D1
palatalizationcyrilliccmb;0484
palochkacyrillic;04C0
pansioskorean;317F
paragraph;00B6
parallel;2225
parenleft;0028
parenleftaltonearabic;FD3E
parenleftbt;F8ED
parenleftex;F8EC
parenleftinferior;208D
parenleftmonospace;FF08
parenleftsmall;FE59
parenleftsuperior;207D
parenlefttp;F8EB
parenleftvertical;FE35
parenright;0029
parenrightaltonearabic;FD3F
parenrightbt;F8F8
parenrightex;F8F7
parenrightinferior;208E
parenrightmonospace;FF09
parenrightsmall;FE5A
parenrightsuperior;207E
parenrighttp;F8F6
parenrightvertical;FE36
partialdiff;2202
paseqhebrew;05C0
pashtahebrew;0599
pasquare;33A9
patah;05B7
patah11;05B7
patah1d;05B7
patah2a;05B7
patahhebrew;05B7
patahnarrowhebrew;05B7
patahquarterhebrew;05B7
patahwidehebrew;05B7
pazerhebrew;05A1
pbopomofo;3106
pcircle;24DF
pdotaccent;1E57
pe;05E4
pecyrillic;043F
pedagesh;FB44
pedageshhebrew;FB44
peezisquare;333B
pefinaldageshhebrew;FB43
peharabic;067E
peharmenian;057A
pehebrew;05E4
pehfinalarabic;FB57
pehinitialarabic;FB58
pehiragana;307A
pehmedialarabic;FB59
pekatakana;30DA
pemiddlehookcyrillic;04A7
perafehebrew;FB4E
percent;0025
percentarabic;066A
percentmonospace;FF05
percentsmall;FE6A
period;002E
periodarmenian;0589
periodcentered;00B7
periodhalfwidth;FF61
periodinferior;F6E7
periodmonospace;FF0E
periodsmall;FE52
periodsuperior;F6E8
perispomenigreekcmb;0342
perpendicular;22A5
perthousand;2030
peseta;20A7
pfsquare;338A
phabengali;09AB
phadeva;092B
phagujarati;0AAB
phagurmukhi;0A2B
phi;03C6
phi1;03D5
phieuphacirclekorean;327A
phieuphaparenkorean;321A
phieuphcirclekorean;326C
phieuphkorean;314D
phieuphparenkorean;320C
philatin;0278
phinthuthai;0E3A
phisymbolgreek;03D5
phook;01A5
phophanthai;0E1E
phophungthai;0E1C
phosamphaothai;0E20
pi;03C0
pieupacirclekorean;3273
pieupaparenkorean;3213
pieupcieuckorean;3176
pieupcirclekorean;3265
pieupkiyeokkorean;3172
pieupkorean;3142
pieupparenkorean;3205
pieupsioskiyeokkorean;3174
pieupsioskorean;3144
pieupsiostikeutkorean;3175
pieupthieuthkorean;3177
pieuptikeutkorean;3173
pihiragana;3074
pikatakana;30D4
pisymbolgreek;03D6
piwrarmenian;0583
plus;002B
plusbelowcmb;031F
pluscircle;2295
plusminus;00B1
plusmod;02D6
plusmonospace;FF0B
plussmall;FE62
plussuperior;207A
pmonospace;FF50
pmsquare;33D8
pohiragana;307D
pointingindexdownwhite;261F
pointingindexleftwhite;261C
pointingindexrightwhite;261E
pointingindexupwhite;261D
pokatakana;30DD
poplathai;0E1B
postalmark;3012
postalmarkface;3020
pparen;24AB
precedes;227A
prescription;211E
primemod;02B9
primereversed;2035
product;220F
projective;2305
prolongedkana;30FC
propellor;2318
propersubset;2282
propersuperset;2283
proportion;2237
proportional;221D
psi;03C8
psicyrillic;0471
psilipneumatacyrilliccmb;0486
pssquare;33B0
puhiragana;3077
pukatakana;30D7
pvsquare;33B4
pwsquare;33BA
q;0071
qadeva;0958
qadmahebrew;05A8
qafarabic;0642
qaffinalarabic;FED6
qafinitialarabic;FED7
qafmedialarabic;FED8
qamats;05B8
qamats10;05B8
qamats1a;05B8
qamats1c;05B8
qamats27;05B8
qamats29;05B8
qamats33;05B8
qamatsde;05B8
qamatshebrew;05B8
qamatsnarrowhebrew;05B8
qamatsqatanhebrew;05B8
qamatsqatannarrowhebrew;05B8
qamatsqatanquarterhebrew;05B8
qamatsqatanwidehebrew;05B8
qamatsquarterhebrew;05B8
qamatswidehebrew;05B8
qarneyparahebrew;059F
qbopomofo;3111
qcircle;24E0
qhook;02A0
qmonospace;FF51
qof;05E7
qofdagesh;FB47
qofdageshhebrew;FB47
qofhatafpatah;05E7 05B2
qofhatafpatahhebrew;05E7 05B2
qofhatafsegol;05E7 05B1
qofhatafsegolhebrew;05E7 05B1
qofhebrew;05E7
qofhiriq;05E7 05B4
qofhiriqhebrew;05E7 05B4
qofholam;05E7 05B9
qofholamhebrew;05E7 05B9
qofpatah;05E7 05B7
qofpatahhebrew;05E7 05B7
qofqamats;05E7 05B8
qofqamatshebrew;05E7 05B8
qofqubuts;05E7 05BB
qofqubutshebrew;05E7 05BB
qofsegol;05E7 05B6
qofsegolhebrew;05E7 05B6
qofsheva;05E7 05B0
qofshevahebrew;05E7 05B0
qoftsere;05E7 05B5
qoftserehebrew;05E7 05B5
qparen;24AC
quarternote;2669
qubuts;05BB
qubuts18;05BB
qubuts25;05BB
qubuts31;05BB
qubutshebrew;05BB
qubutsnarrowhebrew;05BB
qubutsquarterhebrew;05BB
qubutswidehebrew;05BB
question;003F
questionarabic;061F
questionarmenian;055E
questiondown;00BF
questiondownsmall;F7BF
questiongreek;037E
questionmonospace;FF1F
questionsmall;F73F
quotedbl;0022
quotedblbase;201E
quotedblleft;201C
quotedblmonospace;FF02
quotedblprime;301E
quotedblprimereversed;301D
quotedblright;201D
quoteleft;2018
quoteleftreversed;201B
quotereversed;201B
quoteright;2019
quoterightn;0149
quotesinglbase;201A
quotesingle;0027
quotesinglemonospace;FF07
r;0072
raarmenian;057C
rabengali;09B0
racute;0155
radeva;0930
radical;221A
radicalex;F8E5
radoverssquare;33AE
radoverssquaredsquare;33AF
radsquare;33AD
rafe;05BF
rafehebrew;05BF
ragujarati;0AB0
ragurmukhi;0A30
rahiragana;3089
rakatakana;30E9
rakatakanahalfwidth;FF97
ralowerdiagonalbengali;09F1
ramiddlediagonalbengali;09F0
ramshorn;0264
ratio;2236
rbopomofo;3116
rcaron;0159
rcedilla;0157
rcircle;24E1
rcommaaccent;0157
rdblgrave;0211
rdotaccent;1E59
rdotbelow;1E5B
rdotbelowmacron;1E5D
referencemark;203B
reflexsubset;2286
reflexsuperset;2287
registered;00AE
registersans;F8E8
registerserif;F6DA
reharabic;0631
reharmenian;0580
rehfinalarabic;FEAE
rehiragana;308C
rehyehaleflamarabic;0631 FEF3 FE8E 0644
rekatakana;30EC
rekatakanahalfwidth;FF9A
resh;05E8
reshdageshhebrew;FB48
reshhatafpatah;05E8 05B2
reshhatafpatahhebrew;05E8 05B2
reshhatafsegol;05E8 05B1
reshhatafsegolhebrew;05E8 05B1
reshhebrew;05E8
reshhiriq;05E8 05B4
reshhiriqhebrew;05E8 05B4
reshholam;05E8 05B9
reshholamhebrew;05E8 05B9
reshpatah;05E8 05B7
reshpatahhebrew;05E8 05B7
reshqamats;05E8 05B8
reshqamatshebrew;05E8 05B8
reshqubuts;05E8 05BB
reshqubutshebrew;05E8 05BB
reshsegol;05E8 05B6
reshsegolhebrew;05E8 05B6
reshsheva;05E8 05B0
reshshevahebrew;05E8 05B0
reshtsere;05E8 05B5
reshtserehebrew;05E8 05B5
reversedtilde;223D
reviahebrew;0597
reviamugrashhebrew;0597
revlogicalnot;2310
rfishhook;027E
rfishhookreversed;027F
rhabengali;09DD
rhadeva;095D
rho;03C1
rhook;027D
rhookturned;027B
rhookturnedsuperior;02B5
rhosymbolgreek;03F1
rhotichookmod;02DE
rieulacirclekorean;3271
rieulaparenkorean;3211
rieulcirclekorean;3263
rieulhieuhkorean;3140
rieulkiyeokkorean;313A
rieulkiyeoksioskorean;3169
rieulkorean;3139
rieulmieumkorean;313B
rieulpansioskorean;316C
rieulparenkorean;3203
rieulphieuphkorean;313F
rieulpieupkorean;313C
rieulpieupsioskorean;316B
rieulsioskorean;313D
rieulthieuthkorean;313E
rieultikeutkorean;316A
rieulyeorinhieuhkorean;316D
rightangle;221F
righttackbelowcmb;0319
righttriangle;22BF
rihiragana;308A
rikatakana;30EA
rikatakanahalfwidth;FF98
ring;02DA
ringbelowcmb;0325
ringcmb;030A
ringhalfleft;02BF
ringhalfleftarmenian;0559
ringhalfleftbelowcmb;031C
ringhalfleftcentered;02D3
ringhalfright;02BE
ringhalfrightbelowcmb;0339
ringhalfrightcentered;02D2
rinvertedbreve;0213
rittorusquare;3351
rlinebelow;1E5F
rlongleg;027C
rlonglegturned;027A
rmonospace;FF52
rohiragana;308D
rokatakana;30ED
rokatakanahalfwidth;FF9B
roruathai;0E23
rparen;24AD
rrabengali;09DC
rradeva;0931
rragurmukhi;0A5C
rreharabic;0691
rrehfinalarabic;FB8D
rrvocalicbengali;09E0
rrvocalicdeva;0960
rrvocalicgujarati;0AE0
rrvocalicvowelsignbengali;09C4
rrvocalicvowelsigndeva;0944
rrvocalicvowelsigngujarati;0AC4
rsuperior;F6F1
rtblock;2590
rturned;0279
rturnedsuperior;02B4
ruhiragana;308B
rukatakana;30EB
rukatakanahalfwidth;FF99
rupeemarkbengali;09F2
rupeesignbengali;09F3
rupiah;F6DD
ruthai;0E24
rvocalicbengali;098B
rvocalicdeva;090B
rvocalicgujarati;0A8B
rvocalicvowelsignbengali;09C3
rvocalicvowelsigndeva;0943
rvocalicvowelsigngujarati;0AC3
s;0073
sabengali;09B8
sacute;015B
sacutedotaccent;1E65
sadarabic;0635
sadeva;0938
sadfinalarabic;FEBA
sadinitialarabic;FEBB
sadmedialarabic;FEBC
sagujarati;0AB8
sagurmukhi;0A38
sahiragana;3055
sakatakana;30B5
sakatakanahalfwidth;FF7B
sallallahoualayhewasallamarabic;FDFA
samekh;05E1
samekhdagesh;FB41
samekhdageshhebrew;FB41
samekhhebrew;05E1
saraaathai;0E32
saraaethai;0E41
saraaimaimalaithai;0E44
saraaimaimuanthai;0E43
saraamthai;0E33
saraathai;0E30
saraethai;0E40
saraiileftthai;F886
saraiithai;0E35
saraileftthai;F885
saraithai;0E34
saraothai;0E42
saraueeleftthai;F888
saraueethai;0E37
saraueleftthai;F887
sarauethai;0E36
sarauthai;0E38
sarauuthai;0E39
sbopomofo;3119
scaron;0161
scarondotaccent;1E67
scedilla;015F
schwa;0259
schwacyrillic;04D9
schwadieresiscyrillic;04DB
schwahook;025A
scircle;24E2
scircumflex;015D
scommaaccent;0219
sdotaccent;1E61
sdotbelow;1E63
sdotbelowdotaccent;1E69
seagullbelowcmb;033C
second;2033
secondtonechinese;02CA
section;00A7
seenarabic;0633
seenfinalarabic;FEB2
seeninitialarabic;FEB3
seenmedialarabic;FEB4
segol;05B6
segol13;05B6
segol1f;05B6
segol2c;05B6
segolhebrew;05B6
segolnarrowhebrew;05B6
segolquarterhebrew;05B6
segoltahebrew;0592
segolwidehebrew;05B6
seharmenian;057D
sehiragana;305B
sekatakana;30BB
sekatakanahalfwidth;FF7E
semicolon;003B
semicolonarabic;061B
semicolonmonospace;FF1B
semicolonsmall;FE54
semivoicedmarkkana;309C
semivoicedmarkkanahalfwidth;FF9F
sentisquare;3322
sentosquare;3323
seven;0037
sevenarabic;0667
sevenbengali;09ED
sevencircle;2466
sevencircleinversesansserif;2790
sevendeva;096D
seveneighths;215E
sevengujarati;0AED
sevengurmukhi;0A6D
sevenhackarabic;0667
sevenhangzhou;3027
sevenideographicparen;3226
seveninferior;2087
sevenmonospace;FF17
sevenoldstyle;F737
sevenparen;247A
sevenperiod;248E
sevenpersian;06F7
sevenroman;2176
sevensuperior;2077
seventeencircle;2470
seventeenparen;2484
seventeenperiod;2498
seventhai;0E57
sfthyphen;00AD
shaarmenian;0577
shabengali;09B6
shacyrillic;0448
shaddaarabic;0651
shaddadammaarabic;FC61
shaddadammatanarabic;FC5E
shaddafathaarabic;FC60
shaddafathatanarabic;0651 064B
shaddakasraarabic;FC62
shaddakasratanarabic;FC5F
shade;2592
shadedark;2593
shadelight;2591
shademedium;2592
shadeva;0936
shagujarati;0AB6
shagurmukhi;0A36
shalshelethebrew;0593
shbopomofo;3115
shchacyrillic;0449
sheenarabic;0634
sheenfinalarabic;FEB6
sheeninitialarabic;FEB7
sheenmedialarabic;FEB8
sheicoptic;03E3
sheqel;20AA
sheqelhebrew;20AA
sheva;05B0
sheva115;05B0
sheva15;05B0
sheva22;05B0
sheva2e;05B0
shevahebrew;05B0
shevanarrowhebrew;05B0
shevaquarterhebrew;05B0
shevawidehebrew;05B0
shhacyrillic;04BB
shimacoptic;03ED
shin;05E9
shindagesh;FB49
shindageshhebrew;FB49
shindageshshindot;FB2C
shindageshshindothebrew;FB2C
shindageshsindot;FB2D
shindageshsindothebrew;FB2D
shindothebrew;05C1
shinhebrew;05E9
shinshindot;FB2A
shinshindothebrew;FB2A
shinsindot;FB2B
shinsindothebrew;FB2B
shook;0282
sigma;03C3
sigma1;03C2
sigmafinal;03C2
sigmalunatesymbolgreek;03F2
sihiragana;3057
sikatakana;30B7
sikatakanahalfwidth;FF7C
siluqhebrew;05BD
siluqlefthebrew;05BD
similar;223C
sindothebrew;05C2
siosacirclekorean;3274
siosaparenkorean;3214
sioscieuckorean;317E
sioscirclekorean;3266
sioskiyeokkorean;317A
sioskorean;3145
siosnieunkorean;317B
siosparenkorean;3206
siospieupkorean;317D
siostikeutkorean;317C
six;0036
sixarabic;0666
sixbengali;09EC
sixcircle;2465
sixcircleinversesansserif;278F
sixdeva;096C
sixgujarati;0AEC
sixgurmukhi;0A6C
sixhackarabic;0666
sixhangzhou;3026
sixideographicparen;3225
sixinferior;2086
sixmonospace;FF16
sixoldstyle;F736
sixparen;2479
sixperiod;248D
sixpersian;06F6
sixroman;2175
sixsuperior;2076
sixteencircle;246F
sixteencurrencydenominatorbengali;09F9
sixteenparen;2483
sixteenperiod;2497
sixthai;0E56
slash;002F
slashmonospace;FF0F
slong;017F
slongdotaccent;1E9B
smileface;263A
smonospace;FF53
sofpasuqhebrew;05C3
softhyphen;00AD
softsigncyrillic;044C
sohiragana;305D
sokatakana;30BD
sokatakanahalfwidth;FF7F
soliduslongoverlaycmb;0338
solidusshortoverlaycmb;0337
sorusithai;0E29
sosalathai;0E28
sosothai;0E0B
sosuathai;0E2A
space;0020
spacehackarabic;0020
spade;2660
spadesuitblack;2660
spadesuitwhite;2664
sparen;24AE
squarebelowcmb;033B
squarecc;33C4
squarecm;339D
squarediagonalcrosshatchfill;25A9
squarehorizontalfill;25A4
squarekg;338F
squarekm;339E
squarekmcapital;33CE
squareln;33D1
squarelog;33D2
squaremg;338E
squaremil;33D5
squaremm;339C
squaremsquared;33A1
squareorthogonalcrosshatchfill;25A6
squareupperlefttolowerrightfill;25A7
squareupperrighttolowerleftfill;25A8
squareverticalfill;25A5
squarewhitewithsmallblack;25A3
srsquare;33DB
ssabengali;09B7
ssadeva;0937
ssagujarati;0AB7
ssangcieuckorean;3149
ssanghieuhkorean;3185
ssangieungkorean;3180
ssangkiyeokkorean;3132
ssangnieunkorean;3165
ssangpieupkorean;3143
ssangsioskorean;3146
ssangtikeutkorean;3138
ssuperior;F6F2
sterling;00A3
sterlingmonospace;FFE1
strokelongoverlaycmb;0336
strokeshortoverlaycmb;0335
subset;2282
subsetnotequal;228A
subsetorequal;2286
succeeds;227B
suchthat;220B
suhiragana;3059
sukatakana;30B9
sukatakanahalfwidth;FF7D
sukunarabic;0652
summation;2211
sun;263C
superset;2283
supersetnotequal;228B
supersetorequal;2287
svsquare;33DC
syouwaerasquare;337C
t;0074
tabengali;09A4
tackdown;22A4
tackleft;22A3
tadeva;0924
tagujarati;0AA4
tagurmukhi;0A24
taharabic;0637
tahfinalarabic;FEC2
tahinitialarabic;FEC3
tahiragana;305F
tahmedialarabic;FEC4
taisyouerasquare;337D
takatakana;30BF
takatakanahalfwidth;FF80
tatweelarabic;0640
tau;03C4
tav;05EA
tavdages;FB4A
tavdagesh;FB4A
tavdageshhebrew;FB4A
tavhebrew;05EA
tbar;0167
tbopomofo;310A
tcaron;0165
tccurl;02A8
tcedilla;0163
tcheharabic;0686
tchehfinalarabic;FB7B
tchehinitialarabic;FB7C
tchehmedialarabic;FB7D
tchehmeeminitialarabic;FB7C FEE4
tcircle;24E3
tcircumflexbelow;1E71
tcommaaccent;0163
tdieresis;1E97
tdotaccent;1E6B
tdotbelow;1E6D
tecyrillic;0442
tedescendercyrillic;04AD
teharabic;062A
tehfinalarabic;FE96
tehhahinitialarabic;FCA2
tehhahisolatedarabic;FC0C
tehinitialarabic;FE97
tehiragana;3066
tehjeeminitialarabic;FCA1
tehjeemisolatedarabic;FC0B
tehmarbutaarabic;0629
tehmarbutafinalarabic;FE94
tehmedialarabic;FE98
tehmeeminitialarabic;FCA4
tehmeemisolatedarabic;FC0E
tehnoonfinalarabic;FC73
tekatakana;30C6
tekatakanahalfwidth;FF83
telephone;2121
telephoneblack;260E
telishagedolahebrew;05A0
telishaqetanahebrew;05A9
tencircle;2469
tenideographicparen;3229
tenparen;247D
tenperiod;2491
tenroman;2179
tesh;02A7
tet;05D8
tetdagesh;FB38
tetdageshhebrew;FB38
tethebrew;05D8
tetsecyrillic;04B5
tevirhebrew;059B
tevirlefthebrew;059B
thabengali;09A5
thadeva;0925
thagujarati;0AA5
thagurmukhi;0A25
thalarabic;0630
thalfinalarabic;FEAC
thanthakhatlowleftthai;F898
thanthakhatlowrightthai;F897
thanthakhatthai;0E4C
thanthakhatupperleftthai;F896
theharabic;062B
thehfinalarabic;FE9A
thehinitialarabic;FE9B
thehmedialarabic;FE9C
thereexists;2203
therefore;2234
theta;03B8
theta1;03D1
thetasymbolgreek;03D1
thieuthacirclekorean;3279
thieuthaparenkorean;3219
thieuthcirclekorean;326B
thieuthkorean;314C
thieuthparenkorean;320B
thirteencircle;246C
thirteenparen;2480
thirteenperiod;2494
thonangmonthothai;0E11
thook;01AD
thophuthaothai;0E12
thorn;00FE
thothahanthai;0E17
thothanthai;0E10
thothongthai;0E18
thothungthai;0E16
thousandcyrillic;0482
thousandsseparatorarabic;066C
thousandsseparatorpersian;066C
three;0033
threearabic;0663
threebengali;09E9
threecircle;2462
threecircleinversesansserif;278C
threedeva;0969
threeeighths;215C
threegujarati;0AE9
threegurmukhi;0A69
threehackarabic;0663
threehangzhou;3023
threeideographicparen;3222
threeinferior;2083
threemonospace;FF13
threenumeratorbengali;09F6
threeoldstyle;F733
threeparen;2476
threeperiod;248A
threepersian;06F3
threequarters;00BE
threequartersemdash;F6DE
threeroman;2172
threesuperior;00B3
threethai;0E53
thzsquare;3394
tihiragana;3061
tikatakana;30C1
tikatakanahalfwidth;FF81
tikeutacirclekorean;3270
tikeutaparenkorean;3210
tikeutcirclekorean;3262
tikeutkorean;3137
tikeutparenkorean;3202
tilde;02DC
tildebelowcmb;0330
tildecmb;0303
tildecomb;0303
tildedoublecmb;0360
tildeoperator;223C
tildeoverlaycmb;0334
tildeverticalcmb;033E
timescircle;2297
tipehahebrew;0596
tipehalefthebrew;0596
tippigurmukhi;0A70
titlocyrilliccmb;0483
tiwnarmenian;057F
tlinebelow;1E6F
tmonospace;FF54
toarmenian;0569
tohiragana;3068
tokatakana;30C8
tokatakanahalfwidth;FF84
tonebarextrahighmod;02E5
tonebarextralowmod;02E9
tonebarhighmod;02E6
tonebarlowmod;02E8
tonebarmidmod;02E7
tonefive;01BD
tonesix;0185
tonetwo;01A8
tonos;0384
tonsquare;3327
topatakthai;0E0F
tortoiseshellbracketleft;3014
tortoiseshellbracketleftsmall;FE5D
tortoiseshellbracketleftvertical;FE39
tortoiseshellbracketright;3015
tortoiseshellbracketrightsmall;FE5E
tortoiseshellbracketrightvertical;FE3A
totaothai;0E15
tpalatalhook;01AB
tparen;24AF
trademark;2122
trademarksans;F8EA
trademarkserif;F6DB
tretroflexhook;0288
triagdn;25BC
triaglf;25C4
triagrt;25BA
triagup;25B2
ts;02A6
tsadi;05E6
tsadidagesh;FB46
tsadidageshhebrew;FB46
tsadihebrew;05E6
tsecyrillic;0446
tsere;05B5
tsere12;05B5
tsere1e;05B5
tsere2b;05B5
tserehebrew;05B5
tserenarrowhebrew;05B5
tserequarterhebrew;05B5
tserewidehebrew;05B5
tshecyrillic;045B
tsuperior;F6F3
ttabengali;099F
ttadeva;091F
ttagujarati;0A9F
ttagurmukhi;0A1F
tteharabic;0679
ttehfinalarabic;FB67
ttehinitialarabic;FB68
ttehmedialarabic;FB69
tthabengali;09A0
tthadeva;0920
tthagujarati;0AA0
tthagurmukhi;0A20
tturned;0287
tuhiragana;3064
tukatakana;30C4
tukatakanahalfwidth;FF82
tusmallhiragana;3063
tusmallkatakana;30C3
tusmallkatakanahalfwidth;FF6F
twelvecircle;246B
twelveparen;247F
twelveperiod;2493
twelveroman;217B
twentycircle;2473
twentyhangzhou;5344
twentyparen;2487
twentyperiod;249B
two;0032
twoarabic;0662
twobengali;09E8
twocircle;2461
twocircleinversesansserif;278B
twodeva;0968
twodotenleader;2025
twodotleader;2025
twodotleadervertical;FE30
twogujarati;0AE8
twogurmukhi;0A68
twohackarabic;0662
twohangzhou;3022
twoideographicparen;3221
twoinferior;2082
twomonospace;FF12
twonumeratorbengali;09F5
twooldstyle;F732
twoparen;2475
twoperiod;2489
twopersian;06F2
tworoman;2171
twostroke;01BB
twosuperior;00B2
twothai;0E52
twothirds;2154
u;0075
uacute;00FA
ubar;0289
ubengali;0989
ubopomofo;3128
ubreve;016D
ucaron;01D4
ucircle;24E4
ucircumflex;00FB
ucircumflexbelow;1E77
ucyrillic;0443
udattadeva;0951
udblacute;0171
udblgrave;0215
udeva;0909
udieresis;00FC
udieresisacute;01D8
udieresisbelow;1E73
udieresiscaron;01DA
udieresiscyrillic;04F1
udieresisgrave;01DC
udieresismacron;01D6
udotbelow;1EE5
ugrave;00F9
ugujarati;0A89
ugurmukhi;0A09
uhiragana;3046
uhookabove;1EE7
uhorn;01B0
uhornacute;1EE9
uhorndotbelow;1EF1
uhorngrave;1EEB
uhornhookabove;1EED
uhorntilde;1EEF
uhungarumlaut;0171
uhungarumlautcyrillic;04F3
uinvertedbreve;0217
ukatakana;30A6
ukatakanahalfwidth;FF73
ukcyrillic;0479
ukorean;315C
umacron;016B
umacroncyrillic;04EF
umacrondieresis;1E7B
umatragurmukhi;0A41
umonospace;FF55
underscore;005F
underscoredbl;2017
underscoremonospace;FF3F
underscorevertical;FE33
underscorewavy;FE4F
union;222A
universal;2200
uogonek;0173
uparen;24B0
upblock;2580
upperdothebrew;05C4
upsilon;03C5
upsilondieresis;03CB
upsilondieresistonos;03B0
upsilonlatin;028A
upsilontonos;03CD
uptackbelowcmb;031D
uptackmod;02D4
uragurmukhi;0A73
uring;016F
ushortcyrillic;045E
usmallhiragana;3045
usmallkatakana;30A5
usmallkatakanahalfwidth;FF69
ustraightcyrillic;04AF
ustraightstrokecyrillic;04B1
utilde;0169
utildeacute;1E79
utildebelow;1E75
uubengali;098A
uudeva;090A
uugujarati;0A8A
uugurmukhi;0A0A
uumatragurmukhi;0A42
uuvowelsignbengali;09C2
uuvowelsigndeva;0942
uuvowelsigngujarati;0AC2
uvowelsignbengali;09C1
uvowelsigndeva;0941
uvowelsigngujarati;0AC1
v;0076
vadeva;0935
vagujarati;0AB5
vagurmukhi;0A35
vakatakana;30F7
vav;05D5
vavdagesh;FB35
vavdagesh65;FB35
vavdageshhebrew;FB35
vavhebrew;05D5
vavholam;FB4B
vavholamhebrew;FB4B
vavvavhebrew;05F0
vavyodhebrew;05F1
vcircle;24E5
vdotbelow;1E7F
vecyrillic;0432
veharabic;06A4
vehfinalarabic;FB6B
vehinitialarabic;FB6C
vehmedialarabic;FB6D
vekatakana;30F9
venus;2640
verticalbar;007C
verticallineabovecmb;030D
verticallinebelowcmb;0329
verticallinelowmod;02CC
verticallinemod;02C8
vewarmenian;057E
vhook;028B
vikatakana;30F8
viramabengali;09CD
viramadeva;094D
viramagujarati;0ACD
visargabengali;0983
visargadeva;0903
visargagujarati;0A83
vmonospace;FF56
voarmenian;0578
voicediterationhiragana;309E
voicediterationkatakana;30FE
voicedmarkkana;309B
voicedmarkkanahalfwidth;FF9E
vokatakana;30FA
vparen;24B1
vtilde;1E7D
vturned;028C
vuhiragana;3094
vukatakana;30F4
w;0077
wacute;1E83
waekorean;3159
wahiragana;308F
wakatakana;30EF
wakatakanahalfwidth;FF9C
wakorean;3158
wasmallhiragana;308E
wasmallkatakana;30EE
wattosquare;3357
wavedash;301C
wavyunderscorevertical;FE34
wawarabic;0648
wawfinalarabic;FEEE
wawhamzaabovearabic;0624
wawhamzaabovefinalarabic;FE86
wbsquare;33DD
wcircle;24E6
wcircumflex;0175
wdieresis;1E85
wdotaccent;1E87
wdotbelow;1E89
wehiragana;3091
weierstrass;2118
wekatakana;30F1
wekorean;315E
weokorean;315D
wgrave;1E81
whitebullet;25E6
whitecircle;25CB
whitecircleinverse;25D9
whitecornerbracketleft;300E
whitecornerbracketleftvertical;FE43
whitecornerbracketright;300F
whitecornerbracketrightvertical;FE44
whitediamond;25C7
whitediamondcontainingblacksmalldiamond;25C8
whitedownpointingsmalltriangle;25BF
whitedownpointingtriangle;25BD
whiteleftpointingsmalltriangle;25C3
whiteleftpointingtriangle;25C1
whitelenticularbracketleft;3016
whitelenticularbracketright;3017
whiterightpointingsmalltriangle;25B9
whiterightpointingtriangle;25B7
whitesmallsquare;25AB
whitesmilingface;263A
whitesquare;25A1
whitestar;2606
whitetelephone;260F
whitetortoiseshellbracketleft;3018
whitetortoiseshellbracketright;3019
whiteuppointingsmalltriangle;25B5
whiteuppointingtriangle;25B3
wihiragana;3090
wikatakana;30F0
wikorean;315F
wmonospace;FF57
wohiragana;3092
wokatakana;30F2
wokatakanahalfwidth;FF66
won;20A9
wonmonospace;FFE6
wowaenthai;0E27
wparen;24B2
wring;1E98
wsuperior;02B7
wturned;028D
wynn;01BF
x;0078
xabovecmb;033D
xbopomofo;3112
xcircle;24E7
xdieresis;1E8D
xdotaccent;1E8B
xeharmenian;056D
xi;03BE
xmonospace;FF58
xparen;24B3
xsuperior;02E3
y;0079
yaadosquare;334E
yabengali;09AF
yacute;00FD
yadeva;092F
yaekorean;3152
yagujarati;0AAF
yagurmukhi;0A2F
yahiragana;3084
yakatakana;30E4
yakatakanahalfwidth;FF94
yakorean;3151
yamakkanthai;0E4E
yasmallhiragana;3083
yasmallkatakana;30E3
yasmallkatakanahalfwidth;FF6C
yatcyrillic;0463
ycircle;24E8
ycircumflex;0177
ydieresis;00FF
ydotaccent;1E8F
ydotbelow;1EF5
yeharabic;064A
yehbarreearabic;06D2
yehbarreefinalarabic;FBAF
yehfinalarabic;FEF2
yehhamzaabovearabic;0626
yehhamzaabovefinalarabic;FE8A
yehhamzaaboveinitialarabic;FE8B
yehhamzaabovemedialarabic;FE8C
yehinitialarabic;FEF3
yehmedialarabic;FEF4
yehmeeminitialarabic;FCDD
yehmeemisolatedarabic;FC58
yehnoonfinalarabic;FC94
yehthreedotsbelowarabic;06D1
yekorean;3156
yen;00A5
yenmonospace;FFE5
yeokorean;3155
yeorinhieuhkorean;3186
yerahbenyomohebrew;05AA
yerahbenyomolefthebrew;05AA
yericyrillic;044B
yerudieresiscyrillic;04F9
yesieungkorean;3181
yesieungpansioskorean;3183
yesieungsioskorean;3182
yetivhebrew;059A
ygrave;1EF3
yhook;01B4
yhookabove;1EF7
yiarmenian;0575
yicyrillic;0457
yikorean;3162
yinyang;262F
yiwnarmenian;0582
ymonospace;FF59
yod;05D9
yoddagesh;FB39
yoddageshhebrew;FB39
yodhebrew;05D9
yodyodhebrew;05F2
yodyodpatahhebrew;FB1F
yohiragana;3088
yoikorean;3189
yokatakana;30E8
yokatakanahalfwidth;FF96
yokorean;315B
yosmallhiragana;3087
yosmallkatakana;30E7
yosmallkatakanahalfwidth;FF6E
yotgreek;03F3
yoyaekorean;3188
yoyakorean;3187
yoyakthai;0E22
yoyingthai;0E0D
yparen;24B4
ypogegrammeni;037A
ypogegrammenigreekcmb;0345
yr;01A6
yring;1E99
ysuperior;02B8
ytilde;1EF9
yturned;028E
yuhiragana;3086
yuikorean;318C
yukatakana;30E6
yukatakanahalfwidth;FF95
yukorean;3160
yusbigcyrillic;046B
yusbigiotifiedcyrillic;046D
yuslittlecyrillic;0467
yuslittleiotifiedcyrillic;0469
yusmallhiragana;3085
yusmallkatakana;30E5
yusmallkatakanahalfwidth;FF6D
yuyekorean;318B
yuyeokorean;318A
yyabengali;09DF
yyadeva;095F
z;007A
zaarmenian;0566
zacute;017A
zadeva;095B
zagurmukhi;0A5B
zaharabic;0638
zahfinalarabic;FEC6
zahinitialarabic;FEC7
zahiragana;3056
zahmedialarabic;FEC8
zainarabic;0632
zainfinalarabic;FEB0
zakatakana;30B6
zaqefgadolhebrew;0595
zaqefqatanhebrew;0594
zarqahebrew;0598
zayin;05D6
zayindagesh;FB36
zayindageshhebrew;FB36
zayinhebrew;05D6
zbopomofo;3117
zcaron;017E
zcircle;24E9
zcircumflex;1E91
zcurl;0291
zdot;017C
zdotaccent;017C
zdotbelow;1E93
zecyrillic;0437
zedescendercyrillic;0499
zedieresiscyrillic;04DF
zehiragana;305C
zekatakana;30BC
zero;0030
zeroarabic;0660
zerobengali;09E6
zerodeva;0966
zerogujarati;0AE6
zerogurmukhi;0A66
zerohackarabic;0660
zeroinferior;2080
zeromonospace;FF10
zerooldstyle;F730
zeropersian;06F0
zerosuperior;2070
zerothai;0E50
zerowidthjoiner;FEFF
zerowidthnonjoiner;200C
zerowidthspace;200B
zeta;03B6
zhbopomofo;3113
zhearmenian;056A
zhebrevecyrillic;04C2
zhecyrillic;0436
zhedescendercyrillic;0497
zhedieresiscyrillic;04DD
zihiragana;3058
zikatakana;30B8
zinorhebrew;05AE
zlinebelow;1E95
zmonospace;FF5A
zohiragana;305E
zokatakana;30BE
zparen;24B5
zretroflexhook;0290
zstroke;01B6
zuhiragana;305A
zukatakana;30BA
a100;275E
a101;2761
a102;2762
a103;2763
a104;2764
a105;2710
a106;2765
a107;2766
a108;2767
a109;2660
a10;2721
a110;2665
a111;2666
a112;2663
a117;2709
a118;2708
a119;2707
a11;261B
a120;2460
a121;2461
a122;2462
a123;2463
a124;2464
a125;2465
a126;2466
a127;2467
a128;2468
a129;2469
a12;261E
a130;2776
a131;2777
a132;2778
a133;2779
a134;277A
a135;277B
a136;277C
a137;277D
a138;277E
a139;277F
a13;270C
a140;2780
a141;2781
a142;2782
a143;2783
a144;2784
a145;2785
a146;2786
a147;2787
a148;2788
a149;2789
a14;270D
a150;278A
a151;278B
a152;278C
a153;278D
a154;278E
a155;278F
a156;2790
a157;2791
a158;2792
a159;2793
a15;270E
a160;2794
a161;2192
a162;27A3
a163;2194
a164;2195
a165;2799
a166;279B
a167;279C
a168;279D
a169;279E
a16;270F
a170;279F
a171;27A0
a172;27A1
a173;27A2
a174;27A4
a175;27A5
a176;27A6
a177;27A7
a178;27A8
a179;27A9
a17;2711
a180;27AB
a181;27AD
a182;27AF
a183;27B2
a184;27B3
a185;27B5
a186;27B8
a187;27BA
a188;27BB
a189;27BC
a18;2712
a190;27BD
a191;27BE
a192;279A
a193;27AA
a194;27B6
a195;27B9
a196;2798
a197;27B4
a198;27B7
a199;27AC
a19;2713
a1;2701
a200;27AE
a201;27B1
a202;2703
a203;2750
a204;2752
a205;276E
a206;2770
a20;2714
a21;2715
a22;2716
a23;2717
a24;2718
a25;2719
a26;271A
a27;271B
a28;271C
a29;2722
a2;2702
a30;2723
a31;2724
a32;2725
a33;2726
a34;2727
a35;2605
a36;2729
a37;272A
a38;272B
a39;272C
a3;2704
a40;272D
a41;272E
a42;272F
a43;2730
a44;2731
a45;2732
a46;2733
a47;2734
a48;2735
a49;2736
a4;260E
a50;2737
a51;2738
a52;2739
a53;273A
a54;273B
a55;273C
a56;273D
a57;273E
a58;273F
a59;2740
a5;2706
a60;2741
a61;2742
a62;2743
a63;2744
a64;2745
a65;2746
a66;2747
a67;2748
a68;2749
a69;274A
a6;271D
a70;274B
a71;25CF
a72;274D
a73;25A0
a74;274F
a75;2751
a76;25B2
a77;25BC
a78;25C6
a79;2756
a7;271E
a81;25D7
a82;2758
a83;2759
a84;275A
a85;276F
a86;2771
a87;2772
a88;2773
a89;2768
a8;271F
a90;2769
a91;276C
a92;276D
a93;276A
a94;276B
a95;2774
a96;2775
a97;275B
a98;275C
a99;275D
a9;2720
"""
# string table management
#
class StringTable:
def __init__( self, name_list, master_table_name ):
self.names = name_list
self.master_table = master_table_name
self.indices = {}
index = 0
for name in name_list:
self.indices[name] = index
index += len( name ) + 1
self.total = index
def dump( self, file ):
write = file.write
write( " static const char " + self.master_table +
"[" + repr( self.total ) + "] =\n" )
write( " {\n" )
line = ""
for name in self.names:
line += " '"
line += string.join( ( re.findall( ".", name ) ), "','" )
line += "', 0,\n"
write( line + " };\n\n\n" )
def dump_sublist( self, file, table_name, macro_name, sublist ):
write = file.write
write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" )
write( " /* Values are offsets into the `" +
self.master_table + "' table */\n\n" )
write( " static const short " + table_name +
"[" + macro_name + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for name in sublist:
line += comma
line += "%4d" % self.indices[name]
col += 1
comma = ","
if col == 14:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
# We now store the Adobe Glyph List in compressed form. The list is put
# into a data structure called `trie' (because it has a tree-like
# appearance). Consider, for example, that you want to store the
# following name mapping:
#
# A => 1
# Aacute => 6
# Abalon => 2
# Abstract => 4
#
# It is possible to store the entries as follows.
#
# A => 1
# |
# +-acute => 6
# |
# +-b
# |
# +-alon => 2
# |
# +-stract => 4
#
# We see that each node in the trie has:
#
# - one or more `letters'
# - an optional value
# - zero or more child nodes
#
# The first step is to call
#
# root = StringNode( "", 0 )
# for word in map.values():
# root.add( word, map[word] )
#
# which creates a large trie where each node has only one children.
#
# Executing
#
# root = root.optimize()
#
# optimizes the trie by merging the letters of successive nodes whenever
# possible.
#
# Each node of the trie is stored as follows.
#
# - First the node's letter, according to the following scheme. We
# use the fact that in the AGL no name contains character codes > 127.
#
# name bitsize description
# ----------------------------------------------------------------
# notlast 1 Set to 1 if this is not the last letter
# in the word.
# ascii 7 The letter's ASCII value.
#
# - The letter is followed by a children count and the value of the
# current key (if any). Again we can do some optimization because all
# AGL entries are from the BMP; this means that 16 bits are sufficient
# to store its Unicode values. Additionally, no node has more than
# 127 children.
#
# name bitsize description
# -----------------------------------------
# hasvalue 1 Set to 1 if a 16-bit Unicode value follows.
# num_children 7 Number of children. Can be 0 only if
# `hasvalue' is set to 1.
# value 16 Optional Unicode value.
#
# - A node is finished by a list of 16bit absolute offsets to the
# children, which must be sorted in increasing order of their first
# letter.
#
# For simplicity, all 16bit quantities are stored in big-endian order.
#
# The root node has first letter = 0, and no value.
#
class StringNode:
def __init__( self, letter, value ):
self.letter = letter
self.value = value
self.children = {}
def __cmp__( self, other ):
return ord( self.letter[0] ) - ord( other.letter[0] )
def add( self, word, value ):
if len( word ) == 0:
self.value = value
return
letter = word[0]
word = word[1:]
if self.children.has_key( letter ):
child = self.children[letter]
else:
child = StringNode( letter, 0 )
self.children[letter] = child
child.add( word, value )
def optimize( self ):
# optimize all children first
children = self.children.values()
self.children = {}
for child in children:
self.children[child.letter[0]] = child.optimize()
# don't optimize if there's a value,
# if we don't have any child or if we
# have more than one child
if ( self.value != 0 ) or ( not children ) or len( children ) > 1:
return self
child = children[0]
self.letter += child.letter
self.value = child.value
self.children = child.children
return self
def dump_debug( self, write, margin ):
# this is used during debugging
line = margin + "+-"
if len( self.letter ) == 0:
line += "<NOLETTER>"
else:
line += self.letter
if self.value:
line += " => " + repr( self.value )
write( line + "\n" )
if self.children:
margin += "| "
for child in self.children.values():
child.dump_debug( write, margin )
def locate( self, index ):
self.index = index
if len( self.letter ) > 0:
index += len( self.letter ) + 1
else:
index += 2
if self.value != 0:
index += 2
children = self.children.values()
children.sort()
index += 2 * len( children )
for child in children:
index = child.locate( index )
return index
def store( self, storage ):
# write the letters
l = len( self.letter )
if l == 0:
storage += struct.pack( "B", 0 )
else:
for n in range( l ):
val = ord( self.letter[n] )
if n < l - 1:
val += 128
storage += struct.pack( "B", val )
# write the count
children = self.children.values()
children.sort()
count = len( children )
if self.value != 0:
storage += struct.pack( "!BH", count + 128, self.value )
else:
storage += struct.pack( "B", count )
for child in children:
storage += struct.pack( "!H", child.index )
for child in children:
storage = child.store( storage )
return storage
def adobe_glyph_values():
"""return the list of glyph names and their unicode values"""
lines = string.split( adobe_glyph_list, '\n' )
glyphs = []
values = []
for line in lines:
if line:
fields = string.split( line, ';' )
# print fields[1] + ' - ' + fields[0]
subfields = string.split( fields[1], ' ' )
if len( subfields ) == 1:
glyphs.append( fields[0] )
values.append( fields[1] )
return glyphs, values
def filter_glyph_names( alist, filter ):
"""filter `alist' by taking _out_ all glyph names that are in `filter'"""
count = 0
extras = []
for name in alist:
try:
filtered_index = filter.index( name )
except:
extras.append( name )
return extras
def dump_encoding( file, encoding_name, encoding_list ):
"""dump a given encoding"""
write = file.write
write( " /* the following are indices into the SID name table */\n" )
write( " static const unsigned short " + encoding_name +
"[" + repr( len( encoding_list ) ) + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for value in encoding_list:
line += comma
line += "%3d" % value
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
def dump_array( the_array, write, array_name ):
"""dumps a given encoding"""
write( " static const unsigned char " + array_name +
"[" + repr( len( the_array ) ) + "L] =\n" )
write( " {\n" )
line = ""
comma = " "
col = 0
for value in the_array:
line += comma
line += "%3d" % ord( value )
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
if len( line ) > 1024:
write( line )
line = ""
write( line + "\n };\n\n\n" )
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005, 2008, 2011 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
if ( c2 < c )
min = mid + 1;
else
max = mid;
}
goto NotFound;
Found:
for (;;)
{
/* assert (*p & 127) == c */
if ( name >= limit )
{
if ( (p[0] & 128) == 0 &&
(p[1] & 128) != 0 )
return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );
goto NotFound;
}
c = *name++;
if ( p[0] & 128 )
{
p++;
if ( c != (p[0] & 127) )
goto NotFound;
continue;
}
p++;
count = p[0] & 127;
if ( p[0] & 128 )
p += 2;
p++;
for ( ; count > 0; count--, p += 2 )
{
int offset = ( (int)p[0] << 8 ) | p[1];
const unsigned char* q = ft_adobe_glyph_list + offset;
if ( c == ( q[0] & 127 ) )
{
p = q;
goto NextIter;
}
}
goto NotFound;
NextIter:
;
}
NotFound:
return 0;
}
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
""" )
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write( "static const char* const the_names[] = {\n" )
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write( "static const unsigned long the_values[] = {\n" )
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write( """
#include <stdlib.h>
#include <stdio.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
if ( value != reference )
{
result = 1;
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
return result;
}
""" )
write( "#endif /* TEST */\n" )
write("\n/* END */\n")
# Now run the main routine
#
main()
# END
| gpl-2.0 |
linovia/fab-bundle | fab_bundle/templates/backup_dbs.py | 2 | 1429 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import subprocess
KEEP = 7 # number od days to keep
BACKUP_DIR = '/home/{{ user }}/dbs'
IGNORE_DBS = (
'template0',
'template1',
'template_postgis',
'postgres',
)
def run(command):
"""
Runs a command.
Returns stdout if sucess, stderr if failure.
"""
result = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE)
out, err = result.communicate()
if err is not None:
return err
return out
def dbs():
yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
yesterday = yesterday.strftime('%Y-%m-%d')
backup_dir = '%s/%s' % (BACKUP_DIR, yesterday)
run('mkdir -p %s' % backup_dir)
db_list = run('psql -U postgres -l')
for line in db_list.split('\n')[3:-3]:
line = line[1:]
if line.startswith(' '):
continue
db_name = line.split()[0]
if db_name in IGNORE_DBS:
continue
file_name = '%s-%s.sql.gz' % (db_name, yesterday)
cmd = 'pg_dump -U postgres %s | gzip > %s/%s' % (db_name, backup_dir,
file_name)
run(cmd)
old_dirs = run('ls %s' % BACKUP_DIR).strip().split('\n')[:-KEEP]
for old_dir in old_dirs:
run('rm -rf %s/%s' % (BACKUP_DIR, old_dir))
if __name__ == '__main__':
dbs()
| bsd-3-clause |
diagonalwalnut/Experience | oauth_provider/tests/protocol.py | 3 | 17710 | import time
import cgi
import oauth2 as oauth
from django.test import Client
from oauth_provider.tests.auth import BaseOAuthTestCase
from oauth_provider.models import Token, Consumer, Scope
from oauth_provider.compat import get_user_model
User = get_user_model()
class ProtocolExample(BaseOAuthTestCase):
"""Set of tests, based on ProtocolExample document
"""
def _last_created_request_token(self):
return list(Token.objects.filter(token_type=Token.REQUEST))[-1]
def _last_created_access_token(self):
return list(Token.objects.filter(token_type=Token.ACCESS))[-1]
def _update_token_from_db(self, request_token):
"""Get fresh copy of the token from the DB"""
return Token.objects.get(key=request_token.key)
def _make_request_token_parameters(self):
return {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': '%s&' % self.CONSUMER_SECRET,
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': 'requestnonce',
'oauth_version': '1.0',
'oauth_callback': 'http://printer.example.com/request_token_ready',
'scope': 'photos', # custom argument to specify Protected Resource
}
def _make_access_token_parameters(self, token):
return {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_token': token.key,
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': '%s&%s' % (self.CONSUMER_SECRET, token.secret),
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': 'accessnonce',
'oauth_version': '1.0',
'oauth_verifier': token.verifier,
'scope': 'photos',
}
def _make_protected_access_parameters(self, access_token):
return {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_token': access_token.key,
'oauth_signature_method': 'HMAC-SHA1',
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': 'accessresourcenonce',
'oauth_version': '1.0',
}
def test_returns_invalid_params_empty_request(self):
"""Printer website tries to access the photo and receives
HTTP 401 Unauthorized indicating it is private.
The Service Provider includes the following header with the response:
"""
response = self.c.get("/oauth/request_token/")
self.assertEqual(response.status_code, 401)
self.assertEqual(response._headers['www-authenticate'], ('WWW-Authenticate', 'OAuth realm=""'))
self.assertEqual(response.content, 'Invalid request parameters.')
def test_returns_401_wrong_callback(self):
#If you try to put a wrong callback, it will return an error
parameters = self._make_request_token_parameters()
parameters['oauth_callback'] = 'wrongcallback'
parameters['oauth_nonce'] = 'requestnoncewrongcallback'
response = self.c.get("/oauth/request_token/", parameters)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Invalid callback URL.')
def test_401_for_wrong_scope(self):
# If you try to access a resource with a wrong scope, it will return an error
parameters = self._make_request_token_parameters()
parameters['scope'] = 'videos'
parameters['oauth_nonce'] = 'requestnoncevideos'
response = self.c.get("/oauth/request_token/", parameters)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Scope does not exist.')
def test_oob_callback(self):
# If you do not provide any callback (i.e. oob), the Service Provider SHOULD display the value of the verification code
parameters = self._make_request_token_parameters()
parameters['oauth_callback'] = 'oob'
parameters['oauth_nonce'] = 'requestnonceoob'
response = self.c.get("/oauth/request_token/", parameters)
self.assertEqual(response.status_code, 200)
response_params = cgi.parse_qs(response.content)
oob_token = self._last_created_request_token()
self.assertTrue(oob_token.key in response_params['oauth_token'])
self.assertTrue(oob_token.secret in response_params['oauth_token_secret'])
self.assertFalse(oob_token.callback_confirmed)
self.assertIsNone(oob_token.callback)
def _validate_request_token_response(self, response):
self.assertEqual(response.status_code, 200)
response_params = cgi.parse_qs(response.content)
last_token = self._last_created_request_token()
self.assertTrue(last_token.key in response_params['oauth_token'])
self.assertTrue(last_token.secret in response_params['oauth_token_secret'])
self.assertTrue(response_params['oauth_callback_confirmed'])
def _obtain_request_token(self):
parameters = self._make_request_token_parameters()
response = self.c.get("/oauth/request_token/", parameters)
# The Service Provider checks the signature and replies with an unauthorized Request Token in the body of the HTTP response
self._validate_request_token_response(response)
return self._last_created_request_token()
def test_obtain_request_token(self):
self._obtain_request_token()
def test_provider_redirects_to_login_page(self):
"""The Service Provider asks Jane to sign-in using her username and password
"""
token = self._obtain_request_token()
parameters = {
'oauth_token': token.key,
}
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 302)
self.assertTrue(token.key in response['Location'])
self.c.login(username='jane', password='toto')
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 200)
def test_authorize_without_session_parameter(self):
# Then consumer obtains a Request Token
token = self._obtain_request_token()
parameters = {'oauth_token': token.key}
self.c.login(username='jane', password='toto')
parameters['authorize_access'] = True
response = self.c.post("/oauth/authorize/", parameters)
# without session parameter (previous POST removed it)
response = self.c.post("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Action not allowed.')
def test_access_not_granted_by_the_user(self):
token = self._obtain_request_token()
parameters = {'oauth_token': token.key}
self.c.login(username='jane', password='toto')
self.c.get("/oauth/authorize/", parameters) # set session id
parameters['authorize_access'] = False
response = self.c.post("/oauth/authorize/", parameters)
self.assertTrue('error=Access+not+granted+by+user' in response['Location'])
def _request_authorization(self, request_token):
"""Request authorization for the request token.
"""
self.assertFalse(request_token.is_approved)
parameters = {'oauth_token': request_token.key}
self.c.login(username='jane', password='toto')
self.c.get("/oauth/authorize/", parameters)
parameters['authorize_access'] = 1
self.c.post("/oauth/authorize/", parameters)
request_token = self._update_token_from_db(request_token)
self.assertTrue(request_token.is_approved)
def test_request_authorization(self):
token = self._obtain_request_token()
self._request_authorization(token)
def _obtain_access_token(self, request_token):
parameters = self._make_access_token_parameters(request_token)
response = self.c.get("/oauth/access_token/", parameters)
response_params = cgi.parse_qs(response.content)
access_token = self._last_created_access_token()
self.assertEqual(response.status_code, 200)
self.assertEqual(response_params['oauth_token'][0], access_token.key)
self.assertEqual(response_params['oauth_token_secret'][0], access_token.secret)
self.assertEqual(access_token.user.username, 'jane')
return access_token
def test_request_another_access_token(self):
"""The Consumer will not be able to request another Access Token
with the same parameters because the Request Token has been deleted
once Access Token is created
"""
request_token = self._obtain_request_token()
self._request_authorization(request_token)
request_token = self._update_token_from_db(request_token)
self._obtain_access_token(request_token)
parameters = self._make_access_token_parameters(request_token)
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Invalid request token.')
def test_request_access_token_invalid_verifier(self):
"""The Consumer will not be able to request another Access Token
with a missing or invalid verifier
"""
jane = User.objects.get(username='jane')
new_request_token = Token.objects.create_token(
token_type=Token.REQUEST,
timestamp=str(int(time.time())),
consumer=Consumer.objects.get(key=self.CONSUMER_KEY),
user=jane,
scope=Scope.objects.get(name='photos'))
new_request_token.is_approved = True
new_request_token.save()
parameters = self._make_access_token_parameters(new_request_token)
parameters['oauth_token'] = new_request_token.key
parameters['oauth_signature'] = '%s&%s' % (self.CONSUMER_SECRET, new_request_token.secret)
parameters['oauth_verifier'] = 'invalidverifier'
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Invalid OAuth verifier.')
def test_request_access_token_not_approved_request_token(self):
"""The Consumer will not be able to request an Access Token if the token is not approved
"""
jane = User.objects.get(username='jane')
new_request_token = Token.objects.create_token(
token_type=Token.REQUEST,
timestamp=str(int(time.time())),
consumer=Consumer.objects.get(key=self.CONSUMER_KEY),
user=jane,
scope=Scope.objects.get(name='photos'))
new_request_token.is_approved = False
new_request_token.save()
parameters = self._make_access_token_parameters(new_request_token)
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Request Token not approved by the user.')
def test_error_accessing_protected_resource(self):
request_token = self._obtain_request_token()
self._request_authorization(request_token)
request_token = self._update_token_from_db(request_token)
access_token = self._obtain_access_token(request_token)
parameters = self._make_protected_access_parameters(access_token)
parameters['oauth_signature'] = 'wrongsignature'
parameters['oauth_nonce'] = 'anotheraccessresourcenonce'
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 401)
self.assertTrue(response.content.startswith('Could not verify OAuth request.'))
response = self.c.get("/oauth/photo/")
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Invalid request parameters.')
def test_positive(self):
# Then consumer obtains a Request Token
parameters = self._make_request_token_parameters()
response = self.c.get("/oauth/request_token/", parameters)
# The Service Provider checks the signature and replies with an unauthorized Request Token in the body of the HTTP response
self._validate_request_token_response(response)
token = self._last_created_request_token()
parameters = {'oauth_token': token.key}
"""The Consumer redirects Jane's browser to the Service Provider User Authorization URL
to obtain Jane's approval for accessing her private photos.
"""
response = self.c.get("/oauth/authorize/", parameters)
"""The Service Provider asks Jane to sign-in using her username and password
"""
self.assertEqual(response.status_code, 302)
expected_redirect = 'http://testserver/accounts/login/?next=/oauth/authorize/%3Foauth_token%3D{0}'.format(token.key)
self.assertEqual(response['Location'], expected_redirect)
# Jane logins
self.c.login(username='jane', password='toto')
"""If successful, Service Provider asks her if she approves granting printer.example.com
access to her private photos.
"""
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.content.startswith(
'Fake authorize view for printer.example.com with params: oauth_token='))
# Jane approves the request.
self.assertEqual(token.is_approved, 0) # token is not approved yet
parameters['authorize_access'] = 1
response = self.c.post("/oauth/authorize/", parameters)
# The Service Provider redirects her back to the Consumer's callback URL
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].startswith(
'http://printer.example.com/request_token_ready?oauth_verifier='))
self.assertTrue('oauth_token=' in response['Location'])
token = self._last_created_request_token() # get from the DB updated token
self.assertTrue(token.is_approved)
"""
Obtaining an Access Token
"""
"""Now that the Consumer knows Jane approved the Request Token,
it asks the Service Provider to exchange it for an Access Token
"""
# reset Client
self.c = Client()
parameters = self._make_access_token_parameters(token)
response = self.c.get("/oauth/access_token/", parameters)
"""The Service Provider checks the signature and replies with an
Access Token in the body of the HTTP response
"""
self.assertEqual(response.status_code, 200)
response_params = cgi.parse_qs(response.content)
access_token = list(Token.objects.filter(token_type=Token.ACCESS))[-1]
self.assertEqual(response_params['oauth_token'][0], access_token.key)
self.assertEqual(response_params['oauth_token_secret'][0], access_token.secret)
self.assertEqual(access_token.user.username, 'jane')
"""
Accessing protected resources
"""
"""The Consumer is now ready to request the private photo.
Since the photo URL is not secure (HTTP), it must use HMAC-SHA1.
"""
""" Generating Signature Base String
To generate the signature, it first needs to generate the Signature Base String.
The request contains the following parameters (oauth_signature excluded)
which are ordered and concatenated into a normalized string
"""
parameters = self._make_protected_access_parameters(access_token)
""" Calculating Signature Value
HMAC-SHA1 produces the following digest value as a base64-encoded string
(using the Signature Base String as text and self.CONSUMER_SECRET as key)
"""
oauth_request = oauth.Request.from_token_and_callback(access_token,
http_url='http://testserver/oauth/photo/',
parameters=parameters)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
""" Requesting Protected Resource
All together, the Consumer request for the photo is:
"""
parameters['oauth_signature'] = signature
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Protected Resource access!')
""" Revoking Access
If Jane deletes the Access Token of printer.example.com,
the Consumer will not be able to access the Protected Resource anymore
"""
access_token.delete()
# Note that an "Invalid signature" error will be raised here if the
# token is not revoked by Jane because we reuse a previously used one.
parameters['oauth_signature'] = signature
parameters['oauth_nonce'] = 'yetanotheraccessscopenonce'
response = self.c.get(self.scope.url, parameters)
self.assertEqual(response.status_code, 401)
self.assertTrue(response.content.startswith('Invalid access token:'))
| apache-2.0 |
jfhumann/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/constants.py | 963 | 87346 | from __future__ import absolute_import, division, unicode_literals
import string
import gettext
_ = gettext.gettext
EOF = None
E = {
"null-character":
_("Null character in input stream, replaced with U+FFFD."),
"invalid-codepoint":
_("Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_("Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_("Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_("Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_("Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_("Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_("Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_("Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_("Numeric entity expected but none found."),
"named-entity-without-semicolon":
_("Named entity didn't end with ';'."),
"expected-named-entity":
_("Named entity expected. Got none."),
"attributes-in-end-tag":
_("End tag contains unexpected attributes."),
'self-closing-flag-on-end-tag':
_("End tag contains unexpected self-closing flag."),
"expected-tag-name-but-got-right-bracket":
_("Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_("Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)"),
"expected-tag-name":
_("Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_("Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_("Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_("Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_("Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_("Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_("Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_("Invalid character in attribute name"),
"duplicate-attribute":
_("Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_("Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_("Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_("Expected attribute value. Got '>' instead."),
'equals-in-unquoted-attribute-value':
_("Unexpected = in unquoted attribute"),
'unexpected-character-in-unquoted-attribute-value':
_("Unexpected character in unquoted attribute"),
"invalid-character-after-attribute-name":
_("Unexpected character after attribute name."),
"unexpected-character-after-attribute-value":
_("Unexpected character after attribute value."),
"eof-in-attribute-value-double-quote":
_("Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_("Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_("Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_("Unexpected end of file in tag. Expected >"),
"unexpected-character-after-solidus-in-tag":
_("Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_("Expected '--' or 'DOCTYPE'. Not found."),
"unexpected-bang-after-double-dash-in-comment":
_("Unexpected ! after -- in comment"),
"unexpected-space-after-double-dash-in-comment":
_("Unexpected space after -- in comment"),
"incorrect-comment":
_("Incorrect comment."),
"eof-in-comment":
_("Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_("Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_("Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_("Unexpected end of file in comment (--)."),
"eof-in-comment-end-space-state":
_("Unexpected end of file in comment."),
"eof-in-comment-end-bang-state":
_("Unexpected end of file in comment."),
"unexpected-char-in-comment":
_("Unexpected character in comment found."),
"need-space-after-doctype":
_("No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_("Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_("Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_("Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_("Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_("Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_("Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_("Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_("XXX innerHTML EOF"),
"unexpected-doctype":
_("Unexpected DOCTYPE. Ignored."),
"non-html-root":
_("html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_("Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_("Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_("Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_("Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_("Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_("Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_("Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_("Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_("Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_("Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_("Unexpected start tag (%(name)s)."),
"missing-end-tag":
_("Missing end tag (%(name)s)."),
"missing-end-tags":
_("Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_("Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_("Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_("Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_("Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_("Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_("End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_("End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_("End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_("End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_("End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm."),
"adoption-agency-4.4":
_("End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_("Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_("This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_("Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_("Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_("Unexpected non-space characters in "
"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_("Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_("Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_("Unexpected start tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_("Unexpected end tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_("Unexpected table cell start tag (%(name)s) "
"in the table body phase."),
"unexpected-cell-end-tag":
_("Got table cell end tag (%(name)s) "
"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_("Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_("Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_("Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_("Unexpected select start tag in the select phase "
"treated as select end tag."),
"unexpected-input-in-select":
_("Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_("Unexpected start tag token (%(name)s in the select phase. "
"Ignored."),
"unexpected-end-tag-in-select":
_("Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_("Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_("Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_("Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_("Unexpected start tag token (%(name)s)"
" in the after body phase."),
"unexpected-end-tag-after-body":
_("Unexpected end tag token (%(name)s)"
" in the after body phase."),
"unexpected-char-in-frameset":
_("Unexpected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_("Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_("Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_("Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_("Unexpected non-space characters in the "
"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_("Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_("Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_("Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_("Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_("Unexpected start tag (%(name)s)"
". Expected end of file."),
"expected-eof-but-got-end-tag":
_("Unexpected end tag (%(name)s)"
". Expected end of file."),
"eof-in-table":
_("Unexpected end of file. Expected table content."),
"eof-in-select":
_("Unexpected end of file. Expected select content."),
"eof-in-frameset":
_("Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_("Unexpected end of file. Expected script content."),
"eof-in-foreign-lands":
_("Unexpected end of file. Expected foreign content"),
"non-void-element-with-trailing-solidus":
_("Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_("Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_("Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
_("Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
))
htmlIntegrationPointElements = frozenset((
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
))
mathmlTextIntegrationPointElements = frozenset((
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
))
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset((
"\t",
"\n",
"\u000C",
" ",
"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay", "controls")),
"video": frozenset(("autoplay", "controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"hr": frozenset(("noshade")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| mpl-2.0 |
ddcampayo/ddcampayo.github.io | cursos_previos/Curso_CFD_OS/Exploring-OpenFOAM-master/laminarVortexShedding/strouhal.py | 3 | 1531 | #!/usr/bin/python
# Comflics: Exploring OpenFOAM
# Compute Strouhal Number of Laminar Vortex Shedding
# S. Huq, 13MAY17
#
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
# # Read Results
data = np.loadtxt('./postProcessing/forceCoeffs/0/forceCoeffs.dat', skiprows=0)
L = 2 # L = D - Diameter
V = 1 # Velocity
time = data[:,0]
Cd = data[:,2]
Cl = data[:,3]
del data
# # Compute FFT
N = len(time)
dt = time[2] - time[1]
# # inaccurate FFT
# freq = np.fft.fftfreq(N, dt)
# Cd_fft = np.fft.fft(Cd)
# Cl_amp = np.fft.fft(Cl)
# plt.plot(freq, Cl_amp) # Figure 2.10
# plt.show()
# # Better stable FFT
nmax=512 # no. of points in the fft
# freq, Cd_amp = signal.welch(Cd, 1./dt, nperseg=nmax)
freq, Cl_amp = signal.welch(Cl, 1./dt, nperseg=nmax)
plt.plot(freq, Cl_amp) # Figure 2.10
plt.show()
# # Strouhal Number
# Find the index corresponding to max amplitude
Cl_max_fft_idx = np.argmax(abs(Cl_amp))
freq_shed = freq[Cl_max_fft_idx ]
St = freq_shed * L / V
print "Vortex shedding freq: %.3f [Hz]" % (freq_shed)
print "Strouhal Number: %.3f" % (St)
# # Explore Results
# #
# # Figure 2.8
# # See if there atleast 10 cycles of oscillation
# # improves the accuracy;
# plt.plot(time,Cl)
# plt.show()
# # Figure 2.9
# plt.plot(time,Cd)
# plt.show()
# #
# # Exercise
# # Exclude data before onset of the oscillations.
# # approx time = 200 s.
# # Hint: skiprows = 800 - 950
| gpl-3.0 |
Mazecreator/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 3 | 18978 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _fill_array(arr, seq, fillvalue=0):
"""
Recursively fills padded arr with elements from seq.
If length of seq is less then arr padded length, fillvalue used.
Args:
arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len].
seq: Non-padded list of data sampels of shape
[batch_size, ..., padded_dim(None)]
fillvalue: Default fillvalue to use.
"""
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = fillvalue
else:
for subarr, subseq in six.moves.zip_longest(arr, seq, fillvalue=()):
_fill_array(subarr, subseq, fillvalue)
def _pad_if_needed(batch_key_item, fillvalue=0):
""" Returns padded batch.
Args:
batch_key_item: List of data samples of any type with shape
[batch_size, ..., padded_dim(None)].
fillvalue: Default fillvalue to use.
Returns:
Padded with zeros tensor of same type and shape
[batch_size, ..., max_padded_dim_len].
Raises:
ValueError if data samples have different shapes (except last padded dim).
"""
shapes = [seq.shape[:-1] if len(seq.shape) > 0 else -1
for seq in batch_key_item]
if not all(shapes[0] == x for x in shapes):
raise ValueError("Array shapes must match.")
last_length = [seq.shape[-1] if len(seq.shape) > 0 else 0
for seq in batch_key_item]
if all([x == last_length[0] for x in last_length]):
return batch_key_item
batch_size = len(batch_key_item)
max_sequence_length = max(last_length)
result_batch = np.zeros(
shape=[batch_size] + list(shapes[0]) + [max_sequence_length],
dtype=batch_key_item[0].dtype)
_fill_array(result_batch, batch_key_item, fillvalue)
return result_batch
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None,
pad_value=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
self._pad_value = pad_value
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun "
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
if self._pad_value is not None:
feed_dict = {key: np.asarray(_pad_if_needed(item, self._pad_value))
for key, item in list(list_dict.items())}
else:
feed_dict = {key: np.asarray(item)
for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None,
pad_value=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
NotImplementedError: padding and shuffling data at the same time.
NotImplementedError: padding usage with non generator data type.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
pad_data = pad_value is not None
if pad_data and get_feed_fn is not _GeneratorFeedFn:
raise NotImplementedError(
"padding is only available with generator usage")
if shuffle and pad_data:
raise NotImplementedError(
"padding and shuffling data at the same time is not implemented")
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
elif pad_data:
min_after_dequeue = 0 # just for the summary text
queue_shapes = list(map(
lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x,
queue_shapes))
queue = data_flow_ops.PaddingFIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
if not pad_data:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
else:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs,
pad_value=pad_value))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
Thomsen22/MissingMoney | Day Ahead Market - 24 Bus/optimization.py | 1 | 10171 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 13:54:04 2016
@author: Søren
"""
# Python standard modules
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import seaborn as sns
from itertools import cycle, islice
# Own modules
from dayahead_optclass import DayAhead
def optimization():
market = DayAhead()
market.optimize()
times = market.data.times
zones = market.data.zones
generators = market.data.generators
lines = market.data.lines
consumption = market.data.consumption
network = market.data.network
df_zonalconsumption = market.data.df_zonalconsumption
# Zonal prices, found by taking the dual of the powerbalance constraint
df_price = pd.DataFrame(index = times, data = {z: [market.constraints.powerbalance[z,t].pi for t in times] for z in zones})
# Generator production,
df_genprod = pd.DataFrame(index = times, data = {g: [market.variables.gprod[g,t].x for t in times] for g in generators.index})
# Line flow, from node -> to node
df_lineflow = pd.DataFrame(index = times, data = {l: [market.variables.linelimit[l,t].x for t in times] for l in lines})
# Loadshedding in the system
df_loadshed = pd.DataFrame(index = times, data = {z: [market.variables.loadshed[z,t].x for t in times] for z in zones})
# Wind production
df_windprod = pd.DataFrame(index = times, data = {z: [market.variables.windprod[z,t].x for t in times] for z in zones})
windproduction = df_windprod.sum(axis=1)
# Solar production
df_solarprod = pd.DataFrame(index = times, data = {z: [market.variables.solarprod[z,t].x for t in times] for z in zones})
solarproduction = df_solarprod.sum(axis=1)
# Total consumption
total_consumption = consumption.set_index(np.arange(0,len(consumption)))
total_consumption = (total_consumption.sum(axis=1)) - (df_loadshed.sum(axis=1))
# Calculating the wind penetration level
wind_penetration = (windproduction / total_consumption) * 100
solar_penetration = (solarproduction / total_consumption) * 100
df_windsolarload = pd.DataFrame({'Time': df_windprod.index, 'WindProduction[MW]': windproduction.values, 'SolarProduction[MW]': solarproduction.values,\
'TotalConsumption[MW]': total_consumption.values, 'WindPenetration[%]': wind_penetration.values, 'SolarPenetration[%]': solar_penetration.values}).set_index('Time')
# Assigning each zone to a generator
zone_generator = generators[['name','country']].values.tolist()
zone_for_gens = defaultdict(list)
for generator, zone in zone_generator:
zone_for_gens[generator].append(zone)
# Creating a dictionary to contain the market prices
dict_price = {}
for t in times:
for z in np.arange(len(zones)):
dict_price[df_price.columns[z], t] = df_price.ix[df_price.index[t], df_price.columns[z]]
# Creating a dictionary to contain the generator production
dict_genprod = {}
for t in times:
for g in np.arange(len(generators.index)):
dict_genprod[df_genprod.columns[g], t] = df_genprod.ix[df_genprod.index[t], df_genprod.columns[g]]
# Calculating the revenue for each generator
dict_revenue = {}
for t in times:
for g in generators.index:
for z in zone_for_gens[g]:
dict_revenue[g, t] = dict_price[z, t] * dict_genprod[g, t]
# Summing the revenue for all hours
dict_revenue_total = {}
for g in generators.index:
dict_revenue_total[g] = sum(dict_revenue[g, t] for t in times)
df_revenueprod = pd.DataFrame([[key,value] for key,value in dict_revenue_total.items()],columns=["Generator","Total Revenue"]).set_index('Generator')
df_revenueprod['Total Production'] = df_genprod.sum(axis=0)
# Catching the start-up number
dict_startup_number = {}
for t in times[1:]:
for g in generators.index:
dict_startup_number[g, t] = 0
if(dict_genprod[g, t] > 0 and dict_genprod[g, t-1] == 0):
dict_startup_number[g, t] = 1
# Calculating total number of start-ups for all generators
dict_startup_number_total = {}
for g in generators.index:
dict_startup_number_total[g] = sum(dict_startup_number[g,t] for t in times[1:])
startup_number_df = pd.DataFrame([[key,value] for key,value in dict_startup_number_total.items()],columns=["Generator","Total Start-Ups"]).set_index('Generator')
# Assigning different types of generators
hydro_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Hydro']
coal_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Coal']
gas_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Gas']
nuclear_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Nuclear']
oil_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Oil']
# Calculating total hourly production for all technologies
hydroP = {}
for g in hydro_gens:
hydroP[g] = df_genprod[g]
totalhydroP = {}
for t in times:
totalhydroP[t] = sum(hydroP[g][t] for g in hydro_gens)
coalP = {}
for g in coal_gens:
coalP[g] = df_genprod[g]
totalcoalP = {}
for t in times:
totalcoalP[t] = sum(coalP[g][t] for g in coal_gens)
gasP = {}
for g in gas_gens:
gasP[g] = df_genprod[g]
totalgasP = {}
for t in times:
totalgasP[t] = sum(gasP[g][t] for g in gas_gens)
nuclearP = {}
for g in nuclear_gens:
nuclearP[g] = df_genprod[g]
totalnuclearP = {}
for t in times:
totalnuclearP[t] = sum(nuclearP[g][t] for g in nuclear_gens)
oilP = {}
for g in oil_gens:
oilP[g] = df_genprod[g]
totaloilP = {}
for t in times:
totaloilP[t] = sum(oilP[g][t] for g in oil_gens)
# Returning respective production into a dataframe and merges
Oil_df = pd.DataFrame([[key,value] for key,value in totaloilP.items()],columns=["Times", "Oil Production"])#.set_index('Times')
Nuclear_df = pd.DataFrame([[key,value] for key,value in totalnuclearP.items()],columns=["Times", "Nuclear Production"])#.set_index('Times')
Gas_df = pd.DataFrame([[key,value] for key,value in totalgasP.items()],columns=["Times", "Gas Production"])#.set_index('Times')
Coal_df = pd.DataFrame([[key,value] for key,value in totalcoalP.items()],columns=["Times", "Coal Production"])#.set_index('Times')
Hydro_df = pd.DataFrame([[key,value] for key,value in totalhydroP.items()],columns=["Times", "Hydro Production"])#.set_index('Times')
Wind_df = pd.DataFrame(df_windsolarload['WindProduction[MW]'])
Wind_df.rename(columns={'WindProduction[MW]': 'Wind Production'}, inplace=True)
Wind_df['Times'] = times
Solar_df = pd.DataFrame(df_windsolarload['SolarProduction[MW]'])
Solar_df.rename(columns={'SolarProduction[MW]': 'Solar Production'}, inplace=True)
Solar_df['Times'] = times
df_prodtype = pd.DataFrame(Wind_df.merge(Solar_df,on='Times').merge(Hydro_df,on='Times')\
.merge(Nuclear_df,on='Times').merge(Coal_df,on='Times').merge(Gas_df,on='Times').merge(Oil_df,on='Times').set_index('Times'))
# Plots the production as an area diagram
plt.figure(1)
my_colors = list(islice(cycle([sns.xkcd_rgb["windows blue"], sns.xkcd_rgb["yellow"], sns.xkcd_rgb["pale red"]\
, sns.xkcd_rgb["medium green"], sns.xkcd_rgb["amber"], sns.xkcd_rgb["deep purple"],'grey']), None, len(df_prodtype)))
ax = df_prodtype.plot(kind='area', color=my_colors)
ax.set_ylabel('$MW$')
ax.set_xlabel('$Hours$')
ax.set_axis_bgcolor('whitesmoke')
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),
ncol=3, fancybox=True, shadow=True)
# plt.savefig('productionareaplot.pdf')
plt.show()
return df_price, df_genprod, df_lineflow, df_loadshed, df_windsolarload, df_revenueprod, df_prodtype, network, times, generators, startup_number_df, df_zonalconsumption, df_windprod, df_solarprod
def marketoptimization():
df_price, df_genprod, df_lineflow, df_loadshed, df_windsolarload, df_revenueprod, df_prodtype, network, times, generators, startup_number_df, df_zonalconsumption, df_windprod, df_solarprod = optimization()
wind_penetration = df_windsolarload['WindPenetration[%]'].mean(axis=0)
solar_penetration = df_windsolarload['SolarPenetration[%]'].mean(axis=0)
dict_windcost = {}
for z in df_price.columns:
for t in df_price.index:
dict_windcost[z,t] = df_windprod[z][t] * df_price[z][t]
totalwindcost = sum(dict_windcost.values())
dict_solarcost = {}
for z in df_price.columns:
for t in df_price.index:
dict_solarcost[z,t] = df_solarprod[z][t] * df_price[z][t]
totalsolarcost = sum(dict_solarcost.values())
# A dataframe is returned to Excel as a csv file for further work
gen_dataframe = df_revenueprod
gen_dataframe['Total Revenue'] = gen_dataframe['Total Revenue'].map('{:.2f}'.format)
gen_dataframe['Total Production'] = gen_dataframe['Total Production'].map('{:.2f}'.format)
gen_dataframe['Number of S/U´s'] = startup_number_df['Total Start-Ups']
gen_dataframe['Capacity'] = generators.capacity
gen_dataframe['Marginal Cost'] = generators.lincost
gen_dataframe['S/U cost'] = generators.cyclecost
gen_dataframe['Fixed O&M Cost'] = generators.fixedomcost
gen_dataframe['Var O&M Cost'] = generators.varomcost
gen_dataframe['Levelized Capital Cost'] = generators.levcapcost
gen_dataframe['Primary Fuel'] = generators.primaryfuel
gen_dataframe.to_csv('revenue_cost_gen.csv')
return gen_dataframe, wind_penetration, solar_penetration, totalwindcost, totalsolarcost | gpl-3.0 |
fjorba/invenio | modules/webcomment/lib/webcomment_unit_tests.py | 16 | 2189 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import unittest
from invenio.webcomment import calculate_start_date
from invenio.testutils import make_test_suite, run_test_suite
class TestCalculateStartDate(unittest.TestCase):
"""Test for calculating previous date."""
def test_previous_year(self):
"""webcomment - calculate_start_date, values bigger than one year"""
self.assert_(int(calculate_start_date('1y')[:4]) > 2007)
self.assert_(int(calculate_start_date('13m')[:4]) > 2007)
self.assert_(int(calculate_start_date('55w')[:4]) > 2007)
self.assert_(int(calculate_start_date('370d')[:4]) > 2007)
def test_with_random_values(self):
"""webcomment - calculate_start_date, various random values"""
self.assert_(calculate_start_date('1d') > '2009-07-08 14:39:39')
self.assert_(calculate_start_date('2w') > '2009-07-08 14:39:39')
self.assert_(calculate_start_date('2w') > '2009-06-25 14:46:31')
self.assert_(calculate_start_date('2y') > '2007-07-09 14:50:43')
self.assert_(calculate_start_date('6m') > '2009-01-09 14:51:10')
self.assert_(calculate_start_date('77d') > '2009-04-23 14:51:31')
self.assert_(calculate_start_date('20d') > '2009-06-19 14:51:55')
TEST_SUITE = make_test_suite(TestCalculateStartDate)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
amir-qayyum-khan/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_split_w_old_mongo.py | 45 | 6040 | import datetime
import random
import unittest
import uuid
from nose.plugins.attrib import attr
import mock
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from xmodule.modulestore import ModuleStoreEnum
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.mongo import DraftMongoModuleStore
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.tests.utils import MemoryCache
@attr('mongo')
class SplitWMongoCourseBootstrapper(unittest.TestCase):
"""
Helper for tests which need to construct split mongo & old mongo based courses to get interesting internal structure.
Override _create_course and after invoking the super() _create_course, have it call _create_item for
each xblock you want in the course.
This class ensures the db gets created, opened, and cleaned up in addition to creating the course
Defines the following attrs on self:
* user_id: a random non-registered mock user id
* split_mongo: a pointer to the split mongo instance
* draft_mongo: a pointer to the old draft instance
* split_course_key (CourseLocator): of the new course
* old_course_key: the SlashSpecifiedCourseKey for the course
"""
# Snippet of what would be in the django settings envs file
db_config = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_xmodule',
}
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': '',
'render_template': mock.Mock(return_value=""),
'xblock_mixins': (InheritanceMixin, XModuleMixin)
}
split_course_key = CourseLocator('test_org', 'test_course', 'runid', branch=ModuleStoreEnum.BranchName.draft)
def setUp(self):
self.db_config['collection'] = 'modulestore{0}'.format(uuid.uuid4().hex[:5])
self.user_id = random.getrandbits(32)
super(SplitWMongoCourseBootstrapper, self).setUp()
self.split_mongo = SplitMongoModuleStore(
None,
self.db_config,
**self.modulestore_options
)
self.addCleanup(self.split_mongo._drop_database) # pylint: disable=protected-access
self.draft_mongo = DraftMongoModuleStore(
None, self.db_config, branch_setting_func=lambda: ModuleStoreEnum.Branch.draft_preferred,
metadata_inheritance_cache_subsystem=MemoryCache(),
**self.modulestore_options
)
self.addCleanup(self.draft_mongo._drop_database) # pylint: disable=protected-access
self.old_course_key = None
self.runtime = None
self._create_course()
def _create_item(self, category, name, data, metadata, parent_category, parent_name, draft=True, split=True):
"""
Create the item of the given category and block id in split and old mongo, add it to the optional
parent. The parent category is only needed because old mongo requires it for the id.
Note: if draft = False, it will create the draft and then publish it; so, it will overwrite any
existing draft for both the new item and the parent
"""
location = self.old_course_key.make_usage_key(category, name)
self.draft_mongo.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id,
definition_data=data,
metadata=metadata,
runtime=self.runtime
)
if not draft:
self.draft_mongo.publish(location, self.user_id)
if isinstance(data, basestring):
fields = {'data': data}
else:
fields = data.copy()
fields.update(metadata)
if parent_name:
# add child to parent in mongo
parent_location = self.old_course_key.make_usage_key(parent_category, parent_name)
parent = self.draft_mongo.get_item(parent_location)
parent.children.append(location)
self.draft_mongo.update_item(parent, self.user_id)
if not draft:
self.draft_mongo.publish(parent_location, self.user_id)
# create child for split
if split:
self.split_mongo.create_child(
self.user_id,
BlockUsageLocator(
course_key=self.split_course_key,
block_type=parent_category,
block_id=parent_name
),
category,
block_id=name,
fields=fields
)
else:
if split:
self.split_mongo.create_item(
self.user_id,
self.split_course_key,
category,
block_id=name,
fields=fields
)
def _create_course(self, split=True):
"""
* some detached items
* some attached children
* some orphans
"""
metadata = {
'start': datetime.datetime(2000, 3, 13, 4),
'display_name': 'Migration test course',
}
data = {
'wiki_slug': 'test_course_slug'
}
fields = metadata.copy()
fields.update(data)
if split:
# split requires the course to be created separately from creating items
self.split_mongo.create_course(
self.split_course_key.org, self.split_course_key.course, self.split_course_key.run, self.user_id, fields=fields, root_block_id='runid'
)
old_course = self.draft_mongo.create_course(self.split_course_key.org, 'test_course', 'runid', self.user_id, fields=fields)
self.old_course_key = old_course.id
self.runtime = old_course.runtime
| agpl-3.0 |
StefanRijnhart/odoo | addons/project_issue/report/project_issue_report.py | 6 | 4537 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
from openerp.addons.crm import crm
class project_issue_report(osv.osv):
_name = "project.issue.report"
_auto = False
_columns = {
'section_id':fields.many2one('crm.case.section', 'Sale Team', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'opening_date': fields.datetime('Date of Opening', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True),
'date_closed': fields.datetime('Date of Closing', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'stage_id': fields.many2one('project.task.type', 'Stage'),
'nbr': fields.integer('# of Issues', readonly=True), # TDE FIXME master: rename into nbr_issues
'working_hours_open': fields.float('Avg. Working Hours to Open', readonly=True, group_operator="avg"),
'working_hours_close': fields.float('Avg. Working Hours to Close', readonly=True, group_operator="avg"),
'delay_open': fields.float('Avg. Delay to Open', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to open the project issue."),
'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the project issue"),
'company_id' : fields.many2one('res.company', 'Company'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'project_id':fields.many2one('project.project', 'Project',readonly=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'user_id' : fields.many2one('res.users', 'Assigned to',readonly=True),
'partner_id': fields.many2one('res.partner','Contact'),
'channel': fields.char('Channel', readonly=True, help="Communication Channel."),
'task_id': fields.many2one('project.task', 'Task'),
'email': fields.integer('# Emails', size=128, readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_issue_report')
cr.execute("""
CREATE OR REPLACE VIEW project_issue_report AS (
SELECT
c.id as id,
c.date_open as opening_date,
c.create_date as create_date,
c.date_last_stage_update as date_last_stage_update,
c.user_id,
c.working_hours_open,
c.working_hours_close,
c.section_id,
c.stage_id,
date(c.date_closed) as date_closed,
c.company_id as company_id,
c.priority as priority,
c.project_id as project_id,
c.version_id as version_id,
1 as nbr,
c.partner_id,
c.channel,
c.task_id,
c.day_open as delay_open,
c.day_close as delay_close,
(SELECT count(id) FROM mail_message WHERE model='project.issue' AND res_id=c.id) AS email
FROM
project_issue c
LEFT JOIN project_task t on c.task_id = t.id
WHERE c.active= 'true'
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Jgarcia-IAS/localizacion | openerp/addons-extra/odoo-pruebas/odoo-server/openerp/conf/deprecation.py | 380 | 2602 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Regroup variables for deprecated features.
To keep the OpenERP server backward compatible with older modules, some
additional code is needed throughout the core library. This module keeps
track of those specific measures by providing variables that can be unset
by the user to check if her code is future proof.
In a perfect world, all these variables are set to False, the corresponding
code removed, and thus these variables made unnecessary.
"""
# If True, the Python modules inside the openerp namespace are made available
# without the 'openerp.' prefix. E.g. openerp.osv.osv and osv.osv refer to the
# same module.
# Introduced around 2011.02.
# Change to False around 2013.02.
open_openerp_namespace = False
# If True, openerp.netsvc.LocalService() can be used to lookup reports or to
# access openerp.workflow.
# Introduced around 2013.03.
# Among the related code:
# - The openerp.netsvc.LocalService() function.
# - The openerp.report.interface.report_int._reports dictionary.
# - The register attribute in openerp.report.interface.report_int (and in its
# - auto column in ir.actions.report.xml.
# inheriting classes).
allow_local_service = True
# Applies for the register attribute in openerp.report.interface.report_int.
# See comments for allow_local_service above.
# Introduced around 2013.03.
allow_report_int_registration = True
# If True, the functions in openerp.pooler can be used.
# Introduced around 2013.03 (actually they are deprecated since much longer
# but no warning was dispayed in the logs).
openerp_pooler = True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/test/test_anydbm.py | 99 | 2215 | #! /usr/bin/env python
"""Test script for the anydbm module
based on testdumbdbm.py
"""
import os
import unittest
import anydbm
import glob
from test import test_support
_fname = test_support.TESTFN
def _delete_files():
# we don't know the precise name the underlying database uses
# so we use glob to locate all names
for f in glob.glob(_fname + "*"):
try:
os.unlink(f)
except OSError:
pass
class AnyDBMTestCase(unittest.TestCase):
_dict = {'0': '',
'a': 'Python:',
'b': 'Programming',
'c': 'the',
'd': 'way',
'f': 'Guido',
'g': 'intended'
}
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def test_anydbm_creation(self):
f = anydbm.open(_fname, 'c')
self.assertEqual(f.keys(), [])
for key in self._dict:
f[key] = self._dict[key]
self.read_helper(f)
f.close()
def test_anydbm_modification(self):
self.init_db()
f = anydbm.open(_fname, 'c')
self._dict['g'] = f['g'] = "indented"
self.read_helper(f)
f.close()
def test_anydbm_read(self):
self.init_db()
f = anydbm.open(_fname, 'r')
self.read_helper(f)
f.close()
def test_anydbm_keys(self):
self.init_db()
f = anydbm.open(_fname, 'r')
keys = self.keys_helper(f)
f.close()
def read_helper(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key])
def init_db(self):
f = anydbm.open(_fname, 'n')
for k in self._dict:
f[k] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = f.keys()
keys.sort()
dkeys = self._dict.keys()
dkeys.sort()
self.assertEqual(keys, dkeys)
return keys
def tearDown(self):
_delete_files()
def setUp(self):
_delete_files()
def test_main():
try:
test_support.run_unittest(AnyDBMTestCase)
finally:
_delete_files()
if __name__ == "__main__":
test_main()
| apache-2.0 |
cuboxi/android_external_chromium_org | third_party/tlslite/tlslite/integration/IntegrationHelper.py | 121 | 1788 |
class IntegrationHelper:
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings | bsd-3-clause |
yongshengwang/builthue | desktop/core/ext-py/Django-1.4.5/django/contrib/gis/tests/geoapp/test_regress.py | 90 | 3402 | from __future__ import absolute_import
from datetime import datetime
from django.contrib.gis.tests.utils import no_mysql, no_spatialite
from django.contrib.gis.shortcuts import render_to_kmz
from django.db.models import Count
from django.test import TestCase
from .models import City, PennsylvaniaCity, State, Truth
class GeoRegressionTests(TestCase):
def test01_update(self):
"Testing GeoQuerySet.update(). See #10411."
pnt = City.objects.get(name='Pueblo').point
bak = pnt.clone()
pnt.y += 0.005
pnt.x += 0.005
City.objects.filter(name='Pueblo').update(point=pnt)
self.assertEqual(pnt, City.objects.get(name='Pueblo').point)
City.objects.filter(name='Pueblo').update(point=bak)
self.assertEqual(bak, City.objects.get(name='Pueblo').point)
def test02_kmz(self):
"Testing `render_to_kmz` with non-ASCII data. See #11624."
name = '\xc3\x85land Islands'.decode('iso-8859-1')
places = [{'name' : name,
'description' : name,
'kml' : '<Point><coordinates>5.0,23.0</coordinates></Point>'
}]
kmz = render_to_kmz('gis/kml/placemarks.kml', {'places' : places})
@no_spatialite
@no_mysql
def test03_extent(self):
"Testing `extent` on a table with a single point. See #11827."
pnt = City.objects.get(name='Pueblo').point
ref_ext = (pnt.x, pnt.y, pnt.x, pnt.y)
extent = City.objects.filter(name='Pueblo').extent()
for ref_val, val in zip(ref_ext, extent):
self.assertAlmostEqual(ref_val, val, 4)
def test04_unicode_date(self):
"Testing dates are converted properly, even on SpatiaLite. See #16408."
founded = datetime(1857, 5, 23)
mansfield = PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)',
founded=founded)
self.assertEqual(founded, PennsylvaniaCity.objects.dates('founded', 'day')[0])
def test05_empty_count(self):
"Testing that PostGISAdapter.__eq__ does check empty strings. See #13670."
# contrived example, but need a geo lookup paired with an id__in lookup
pueblo = City.objects.get(name='Pueblo')
state = State.objects.filter(poly__contains=pueblo.point)
cities_within_state = City.objects.filter(id__in=state)
# .count() should not throw TypeError in __eq__
self.assertEqual(cities_within_state.count(), 1)
def test06_defer_or_only_with_annotate(self):
"Regression for #16409. Make sure defer() and only() work with annotate()"
self.assertIsInstance(list(City.objects.annotate(Count('point')).defer('name')), list)
self.assertIsInstance(list(City.objects.annotate(Count('point')).only('name')), list)
def test07_boolean_conversion(self):
"Testing Boolean value conversion with the spatial backend, see #15169."
t1 = Truth.objects.create(val=True)
t2 = Truth.objects.create(val=False)
val1 = Truth.objects.get(pk=1).val
val2 = Truth.objects.get(pk=2).val
# verify types -- should't be 0/1
self.assertIsInstance(val1, bool)
self.assertIsInstance(val2, bool)
# verify values
self.assertEqual(val1, True)
self.assertEqual(val2, False)
| apache-2.0 |
CollabQ/CollabQ | vendor/django/core/serializers/json.py | 26 | 2095 | """
Serialize data to/from JSON
"""
import datetime
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
try:
import decimal
except ImportError:
from django.utils import _decimal as decimal # Python 2.3 fallback
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def end_serialization(self):
self.options.pop('stream', None)
self.options.pop('fields', None)
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder, **self.options)
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream)):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def default(self, o):
if isinstance(o, datetime.datetime):
d = datetime_safe.new_datetime(o)
return d.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT))
elif isinstance(o, datetime.date):
d = datetime_safe.new_date(o)
return d.strftime(self.DATE_FORMAT)
elif isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| apache-2.0 |
belmiromoreira/nova | nova/compute/instance_actions.py | 74 | 1447 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible actions on an instance.
Actions should probably match a user intention at the API level. Because they
can be user visible that should help to avoid confusion. For that reason they
tend to maintain the casing sent to the API.
Maintaining a list of actions here should protect against inconsistencies when
they are used.
"""
CREATE = 'create'
DELETE = 'delete'
EVACUATE = 'evacuate'
RESTORE = 'restore'
STOP = 'stop'
START = 'start'
REBOOT = 'reboot'
REBUILD = 'rebuild'
REVERT_RESIZE = 'revertResize'
CONFIRM_RESIZE = 'confirmResize'
RESIZE = 'resize'
MIGRATE = 'migrate'
PAUSE = 'pause'
UNPAUSE = 'unpause'
SUSPEND = 'suspend'
RESUME = 'resume'
RESCUE = 'rescue'
UNRESCUE = 'unrescue'
CHANGE_PASSWORD = 'changePassword'
SHELVE = 'shelve'
UNSHELVE = 'unshelve'
LIVE_MIGRATION = 'live-migration'
| apache-2.0 |
entomb/CouchPotatoServer | couchpotato/core/downloaders/synology/main.py | 5 | 6224 | from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
import json
import requests
import traceback
log = CPLog(__name__)
class Synology(Downloader):
protocol = ['nzb', 'torrent', 'torrent_magnet']
status_support = False
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
response = False
log.error('Sending "%s" (%s) to Synology.', (data['name'], data['protocol']))
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
try:
# Send request to Synology
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
if data['protocol'] == 'torrent_magnet':
log.info('Adding torrent URL %s', data['url'])
response = srpc.create_task(url = data['url'])
elif data['protocol'] in ['nzb', 'torrent']:
log.info('Adding %s' % data['protocol'])
if not filedata:
log.error('No %s data found', data['protocol'])
else:
filename = data['name'] + '.' + data['protocol']
response = srpc.create_task(filename = filename, filedata = filedata)
except:
log.error('Exception while adding torrent: %s', traceback.format_exc())
finally:
return self.downloadReturnId('') if response else False
def test(self):
host = cleanHost(self.conf('host'), protocol = False).split(':')
try:
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
test_result = srpc.test()
except:
return False
return test_result
def getEnabledProtocol(self):
if self.conf('use_for') == 'both':
return super(Synology, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
return ['torrent', 'torrent_magnet']
else:
return ['nzb']
def isEnabled(self, manual = False, data = None):
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):
for_protocol.append('torrent')
elif data:
for_protocol.append(data.get('protocol'))
return super(Synology, self).isEnabled(manual, data) and\
((self.conf('use_for') in for_protocol))
class SynologyRPC(object):
"""SynologyRPC lite library"""
def __init__(self, host = 'localhost', port = 5000, username = None, password = None):
super(SynologyRPC, self).__init__()
self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port)
self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port)
self.username = username
self.password = password
self.session_name = 'DownloadStation'
def _login(self):
if self.username and self.password:
args = {'api': 'SYNO.API.Auth', 'account': self.username, 'passwd': self.password, 'version': 2,
'method': 'login', 'session': self.session_name, 'format': 'sid'}
response = self._req(self.auth_url, args)
if response['success']:
self.sid = response['data']['sid']
log.debug('sid=%s', self.sid)
else:
log.error('Couldn\'t login to Synology, %s', response)
return response['success']
else:
log.error('User or password missing, not using authentication.')
return False
def _logout(self):
args = {'api':'SYNO.API.Auth', 'version':1, 'method':'logout', 'session':self.session_name, '_sid':self.sid}
return self._req(self.auth_url, args)
def _req(self, url, args, files = None):
response = {'success': False}
try:
req = requests.post(url, data = args, files = files)
req.raise_for_status()
response = json.loads(req.text)
if response['success']:
log.info('Synology action successfull')
return response
except requests.ConnectionError as err:
log.error('Synology connection error, check your config %s', err)
except requests.HTTPError as err:
log.error('SynologyRPC HTTPError: %s', err)
except Exception as err:
log.error('Exception: %s', err)
finally:
return response
def create_task(self, url = None, filename = None, filedata = None):
""" Creates new download task in Synology DownloadStation. Either specify
url or pair (filename, filedata).
Returns True if task was created, False otherwise
"""
result = False
# login
if self._login():
args = {'api': 'SYNO.DownloadStation.Task',
'version': '1',
'method': 'create',
'_sid': self.sid}
if url:
log.info('Login success, adding torrent URI')
args['uri'] = url
response = self._req(self.download_url, args = args)
log.info('Response: %s', response)
result = response['success']
elif filename and filedata:
log.info('Login success, adding torrent')
files = {'file': (filename, filedata)}
response = self._req(self.download_url, args = args, files = files)
log.info('Response: %s', response)
result = response['success']
else:
log.error('Invalid use of SynologyRPC.create_task: either url or filename+filedata must be specified')
self._logout()
return result
def test(self):
return bool(self._login())
| gpl-3.0 |
photoninger/ansible | lib/ansible/modules/crypto/openssl_privatekey.py | 28 | 10517 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openssl_privatekey
author: "Yanis Guenane (@Spredzy)"
version_added: "2.3"
short_description: Generate OpenSSL private keys.
description:
- "This module allows one to (re)generate OpenSSL private keys. It uses
the pyOpenSSL python library to interact with openssl. One can generate
either RSA or DSA private keys. Keys are generated in PEM format."
requirements:
- "python-pyOpenSSL"
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
size:
required: false
default: 4096
description:
- Size (in bits) of the TLS/SSL key to generate
type:
required: false
default: "RSA"
choices: [ RSA, DSA ]
description:
- The algorithm used to generate the TLS/SSL private key
force:
required: false
default: False
choices: [ True, False ]
description:
- Should the key be regenerated even it it already exists
path:
required: true
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
passphrase:
required: false
description:
- The passphrase for the private key.
version_added: "2.4"
cipher:
required: false
description:
- The cipher to encrypt the private key. (cipher can be found by running `openssl list-cipher-algorithms`)
version_added: "2.4"
extends_documentation_fragment: files
'''
EXAMPLES = '''
# Generate an OpenSSL private key with the default values (4096 bits, RSA)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
# Generate an OpenSSL private key with the default values (4096 bits, RSA)
# and a passphrase
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
# Generate an OpenSSL private key with a different size (2048 bits)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
# Force regenerate an OpenSSL private key if it already exists
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: True
# Generate an OpenSSL private key with a different algorithm (DSA)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = '''
size:
description: Size (in bits) of the TLS/SSL private key
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key
returned: changed or success
type: string
sample: RSA
filename:
description: Path to the generated TLS/SSL private key file
returned: changed or success
type: string
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description: The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
'''
import os
import traceback
try:
from OpenSSL import crypto
except ImportError:
pyopenssl_found = False
else:
pyopenssl_found = True
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
class PrivateKeyError(crypto_utils.OpenSSLObjectError):
pass
class PrivateKey(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(PrivateKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.mode = module.params.get('mode', None)
if self.mode is None:
self.mode = 0o600
self.type = crypto.TYPE_RSA
if module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False) or self.force:
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
try:
privatekey_file = os.open(self.path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(privatekey_file)
if isinstance(self.mode, string_types):
try:
self.mode = int(self.mode, 8)
except ValueError as e:
try:
st = os.lstat(self.path)
self.mode = AnsibleModule._symbolic_mode_to_octal(st, self.mode)
except ValueError as e:
module.fail_json(msg="%s" % to_native(e), exception=traceback.format_exc())
os.chmod(self.path, self.mode)
privatekey_file = os.open(self.path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, self.mode)
if self.cipher and self.passphrase:
os.write(privatekey_file, crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase)))
else:
os.write(privatekey_file, crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey))
os.close(privatekey_file)
self.changed = True
except IOError as exc:
self.remove()
raise PrivateKeyError(exc)
self.fingerprint = crypto_utils.get_fingerprint(self.path, self.passphrase)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKey, self).check(module, perms_required)
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
def _check_passphrase():
try:
crypto_utils.load_privatekey(self.path, self.passphrase)
return True
except crypto.Error:
return False
if not state_and_perms or not _check_passphrase():
return False
privatekey = crypto_utils.load_privatekey(self.path, self.passphrase)
return _check_size(privatekey) and _check_type(privatekey)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
size=dict(default=4096, type='int'),
type=dict(default='RSA', choices=['RSA', 'DSA'], type='str'),
force=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[['cipher', 'passphrase']],
)
if not pyopenssl_found:
module.fail_json(msg='the python pyOpenSSL module is required')
base_dir = os.path.dirname(module.params['path'])
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
private_key = PrivateKey(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = module.params['force'] or not private_key.check(module)
module.exit_json(**result)
try:
private_key.generate(module)
except PrivateKeyError as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
try:
private_key.remove()
except PrivateKeyError as exc:
module.fail_json(msg=to_native(exc))
result = private_key.dump()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kmmartins/xbmc | addons/service.xbmc.versioncheck/service.py | 62 | 4015 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import platform
import xbmc
import lib.common
from lib.common import log, dialog_yesno
from lib.common import upgrade_message as _upgrademessage
from lib.common import upgrade_message2 as _upgrademessage2
__addon__ = lib.common.__addon__
__addonversion__ = lib.common.__addonversion__
__addonname__ = lib.common.__addonname__
__addonpath__ = lib.common.__addonpath__
__icon__ = lib.common.__icon__
oldversion = False
class Main:
def __init__(self):
linux = False
packages = []
xbmc.sleep(5000)
if xbmc.getCondVisibility('System.Platform.Linux') and __addon__.getSetting("upgrade_apt") == 'true':
packages = ['kodi']
_versionchecklinux(packages)
else:
oldversion, version_installed, version_available, version_stable = _versioncheck()
if oldversion:
_upgrademessage2( version_installed, version_available, version_stable, oldversion, False)
def _versioncheck():
# initial vars
from lib.jsoninterface import get_installedversion, get_versionfilelist
from lib.versions import compare_version
# retrieve versionlists from supplied version file
versionlist = get_versionfilelist()
# retrieve version installed
version_installed = get_installedversion()
# copmpare installed and available
oldversion, version_installed, version_available, version_stable = compare_version(version_installed, versionlist)
return oldversion, version_installed, version_available, version_stable
def _versionchecklinux(packages):
if platform.dist()[0].lower() in ['ubuntu', 'debian', 'linuxmint']:
handler = False
result = False
try:
# try aptdeamon first
from lib.aptdeamonhandler import AptdeamonHandler
handler = AptdeamonHandler()
except:
# fallback to shell
# since we need the user password, ask to check for new version first
from lib.shellhandlerapt import ShellHandlerApt
sudo = True
handler = ShellHandlerApt(sudo)
if dialog_yesno(32015):
pass
elif dialog_yesno(32009, 32010):
log("disabling addon by user request")
__addon__.setSetting("versioncheck_enable", 'false')
return
if handler:
if handler.check_upgrade_available(packages[0]):
if _upgrademessage(32012, oldversion, True):
if __addon__.getSetting("upgrade_system") == "false":
result = handler.upgrade_package(packages[0])
else:
result = handler.upgrade_system()
if result:
from lib.common import message_upgrade_success, message_restart
message_upgrade_success()
message_restart()
else:
log("Error during upgrade")
else:
log("Error: no handler found")
else:
log("Unsupported platform %s" %platform.dist()[0])
sys.exit(0)
if (__name__ == "__main__"):
log('Version %s started' % __addonversion__)
Main()
| gpl-2.0 |
Edraak/edraak-platform | openedx/core/djangoapps/ace_common/tests/mixins.py | 4 | 3300 | # pylint: disable=missing-docstring
from urlparse import parse_qs, urlparse
import uuid
from django.http import HttpRequest
from mock import patch
from edx_ace import Message, Recipient
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from student.tests.factories import UserFactory
class QueryStringAssertionMixin(object):
def assert_query_string_equal(self, expected_qs, actual_qs):
"""
Compares two query strings to see if they are equivalent. Note that order of parameters is not significant.
Args:
expected_qs (str): The expected query string.
actual_qs (str): The actual query string.
Raises:
AssertionError: If the two query strings are not equal.
"""
self.assertDictEqual(parse_qs(expected_qs), parse_qs(actual_qs))
def assert_url_components_equal(self, url, **kwargs):
"""
Assert that the provided URL has the expected components with the expected values.
Args:
url (str): The URL to parse and make assertions about.
**kwargs: The expected component values. For example: scheme='https' would assert that the URL scheme was
https.
Raises:
AssertionError: If any of the expected components do not match.
"""
parsed_url = urlparse(url)
for expected_component, expected_value in kwargs.items():
if expected_component == 'query':
self.assert_query_string_equal(expected_value, parsed_url.query)
else:
self.assertEqual(expected_value, getattr(parsed_url, expected_component))
def assert_query_string_parameters_equal(self, url, **kwargs):
"""
Assert that the provided URL has query string paramters that match the kwargs.
Args:
url (str): The URL to parse and make assertions about.
**kwargs: The expected query string parameter values. For example: foo='bar' would assert that foo=bar
appeared in the query string.
Raises:
AssertionError: If any of the expected parameters values do not match.
"""
parsed_url = urlparse(url)
parsed_qs = parse_qs(parsed_url.query)
for expected_key, expected_value in kwargs.items():
self.assertEqual(parsed_qs[expected_key], [str(expected_value)])
class EmailTemplateTagMixin(object):
def setUp(self):
super(EmailTemplateTagMixin, self).setUp()
patcher = patch('openedx.core.djangoapps.ace_common.templatetags.ace.get_current_request')
self.mock_get_current_request = patcher.start()
self.addCleanup(patcher.stop)
self.fake_request = HttpRequest()
self.fake_request.user = UserFactory.create()
self.fake_request.site = SiteFactory.create()
self.fake_request.site.domain = 'example.com'
self.mock_get_current_request.return_value = self.fake_request
self.message = Message(
app_label='test_app_label',
name='test_name',
recipient=Recipient(username='test_user'),
context={},
send_uuid=uuid.uuid4(),
)
self.context = {
'message': self.message
}
| agpl-3.0 |
shiora/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/pip/_vendor/requests/packages/charade/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| gpl-2.0 |
javiplx/cobbler-devel | cobbler/commands.py | 2 | 17678 | """
Command line handling for Cobbler.
Copyright 2008, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import optparse
from cexceptions import *
from utils import _
import sys
import api
HELP_FORMAT = "%-20s%s"
#=============================================================
class FunctionLoader:
"""
The F'n Loader controls processing of cobbler commands.
"""
def __init__(self, api):
"""
When constructed the loader has no functions.
"""
self.api = api
self.functions = {}
def add_func(self, obj):
"""
Adds a CobblerFunction instance to the loader.
"""
self.functions[obj.command_name()] = obj
def run(self, args):
"""
Runs a command line sequence through the loader.
"""
args = self.old_school_remap(args)
# if no args given, show all loaded fns
if len(args) == 1:
return self.show_options()
called_name = args[1].lower()
# if -v or --version, make it work
if called_name in [ "--version", "-v" ]:
called_name = "version"
args = [ "/usr/bin/cobbler", "version" ]
# also show avail options if command name is bogus
if len(args) == 2 and not called_name in self.functions.keys():
if "--helpbash" in args:
return self.show_options_bashcompletion()
else:
return self.show_options()
try:
fn = self.functions[called_name]
except:
return self.show_options()
subs = fn.subcommands()
# three cases to show subcommands:
# (A): cobbler profile
# (B): cobbler profile --help
if len(subs) != 0:
problem = False
if (len(args) == 2):
problem = True
starter = args[-1]
if (("-h" in args or "--help" in args) and (len(args) == 3)):
problem = True
starter = args[-2]
elif len(args) >= 3:
ignore_it = False
for x in args[2:]:
if x.startswith("--"):
ignore_it = True
if not x.startswith("-") and x not in subs and not ignore_it:
problem = True
starter = args[1]
if problem:
print "usage:"
print "======"
for x in subs:
print "cobbler %s %s" % (starter, x)
sys.exit(1)
# some functions require args, if none given, show subcommands
#if len(args) == 2:
# no_args_rc = fn.no_args_handler()
# if no_args_rc:
# return True
# finally let the object parse its own args
loaded_ok = fn.parse_args(args)
if not loaded_ok:
raise CX(_("Invalid arguments"))
return fn.run()
def old_school_remap(self,args):
"""
Replaces commands with common synonyms that should also work
Also maps commands like:
# cobbler system report foo to cobbler report --name=foo
to:
# cobblerr system report --name=foo
for backwards compat and usability reasons
"""
# to do: handle synonyms
for ct in range(0,len(args)):
args[ct] = args[ct]
if args[ct].startswith("-"):
# stop synonym mapping after first option
break
# lowercase all args
args[ct] = args[ct].lower()
# delete means remove
# are there any other common synonyms?
if args[ct] == "delete":
args[ct] = "remove"
# special handling for reports follows:
if not "report" in args:
return args
ok = False
for x in ["distro","profile","system","repo","image"]:
if x in args:
ok = True
if not ok:
return args
idx = args.index("report")
if idx + 1 < len(args):
name = args[idx+1]
if name.find("--name") == -1:
args[idx+1] = "--name=%s" % name
return args
def show_options(self):
"""
Prints out all loaded functions.
"""
print "commands: (use --help on a subcommand for usage)"
print "========"
names = self.functions.keys()
names.sort()
for name in names:
help = self.functions[name].help_me()
if help != "":
print help
def show_options_bashcompletion(self):
"""
Prints out all loaded functions in an easily parseable form for
bash-completion
"""
names = self.functions.keys()
names.sort()
print ' '.join(names)
#=============================================================
class CobblerFunction:
def __init__(self,api):
"""
Constructor requires a Cobbler API handle.
"""
self.api = api
self.args = []
def command_name(self):
"""
The name of the command, as to be entered by users.
"""
return "unspecified"
def subcommands(self):
"""
The names of any subcommands, such as "add", "edit", etc
"""
return [ ]
def run(self):
"""
Called after arguments are parsed. Return True for success.
"""
return True
def add_options(self, parser, args):
"""
Used by subclasses to add options. See subclasses for examples.
"""
pass
def helpbash(self, parser, args, print_options = True, print_subs = False):
"""
Print out the arguments in an easily parseable format
"""
# We only want to print either the subcommands available or the
# options, but not both
option_list = []
if print_subs:
for sub in self.subcommands():
option_list.append(sub.__str__())
elif print_options:
for opt in parser.option_list:
option_list.extend(opt.__str__().split('/'))
print ' '.join(option_list)
def parse_args(self,args):
"""
Processes arguments, called prior to run ... do not override.
"""
accum = ""
for x in args[1:]:
if not x.startswith("-"):
accum = accum + "%s " % x
else:
break
p = optparse.OptionParser(usage="cobbler %s [ARGS]" % accum)
self.add_options(p, args)
subs = self.subcommands()
if len(subs) > 0:
count = 0
for x in subs:
if x in args:
count = count + 1
if count != 1:
print "usage:"
print "======"
for x in subs:
print "cobbler %s %s [ARGS]" % (self.command_name(), x)
return True
(self.options, self.args) = p.parse_args(args)
return True
def object_manipulator_start(self,new_fn,collect_fn,subobject=False):
"""
Boilerplate for objects that offer add/edit/delete/remove/copy functionality.
"""
if "dumpvars" in self.args:
if not self.options.name:
raise CX(_("name is required"))
obj = collect_fn().find(self.options.name)
if obj is None:
raise CX(_("object not found"))
return obj
if "poweron" in self.args:
obj = collect_fn().find(self.options.name)
if obj is None:
raise CX(_("object not found"))
self.api.power_on(obj,self.options.power_user,self.options.power_pass)
return None
if "poweroff" in self.args:
obj = collect_fn().find(self.options.name)
if obj is None:
raise CX(_("object not found"))
self.api.power_off(obj,self.options.power_user,self.options.power_pass)
return None
if "reboot" in self.args:
obj = collect_fn().find(self.options.name)
if obj is None:
raise CX(_("object not found"))
self.api.reboot(obj,self.options.power_user,self.options.power_pass)
return None
if "deploy" in self.args:
obj = collect_fn().find(self.options.name)
if obj is None:
raise CX(_("object not found"))
if self.options.virt_host == '':
virt_host = None
else:
virt_host = self.options.virt_host
if self.options.virt_group == '':
virt_group = None
else:
virt_group = self.options.virt_group
self.api.deploy(obj,virt_host=virt_host,virt_group=virt_group)
if "remove" in self.args:
recursive = False
# only applies to distros/profiles and is not supported elsewhere
if hasattr(self.options, "recursive"):
recursive = self.options.recursive
if not self.options.name:
raise CX(_("name is required"))
if not recursive:
collect_fn().remove(self.options.name,with_delete=True,recursive=False)
else:
collect_fn().remove(self.options.name,with_delete=True,recursive=True)
return None # signal that we want no further processing on the object
if "list" in self.args:
self.list_list(collect_fn())
return None
if "report" in self.args:
if self.options.name is None:
return self.api.report(report_what = self.args[1], report_name = None, \
report_type = 'text', report_fields = 'all')
else:
return self.api.report(report_what = self.args[1], report_name = self.options.name, \
report_type = 'text', report_fields = 'all')
if "getks" in self.args:
if not self.options.name:
raise CX(_("name is required"))
obj = collect_fn().find(self.options.name)
if obj is None:
raise CX(_("object not found"))
return obj
if "deploy" in self.args:
if not self.options.name:
raise CX(_("name is required"))
obj = collect_fn().find(self.options.name)
if obj is None:
raise CX(_("object not found"))
if obj.virt_host == '' or not self.options.virt_host or not self.options.virt_group:
raise CX(_("No virtual host to deploy to"))
return obj
try:
# catch some invalid executions of the CLI
getattr(self, "options")
except:
sys.exit(1)
if not self.options.name:
raise CX(_("name is required"))
if "add" in self.args:
obj = new_fn(is_subobject=subobject)
else:
if "delete" in self.args:
collect_fn().remove(self.options.name, with_delete=True)
return None
obj = collect_fn().find(self.options.name)
if obj is None:
raise CX(_("object named (%s) not found") % self.options.name)
if not "copy" in self.args and not "rename" in self.args and self.options.name:
obj.set_name(self.options.name)
return obj
def object_manipulator_finish(self,obj,collect_fn, options):
"""
Boilerplate for objects that offer add/edit/delete/remove/copy functionality.
"""
if "dumpvars" in self.args:
print obj.dump_vars(True)
return True
if "getks" in self.args:
ba=api.BootAPI()
if "system" in self.args:
rc = ba.generate_kickstart(None, self.options.name)
if "profile" in self.args:
rc = ba.generate_kickstart(self.options.name, None)
if rc is None:
print "kickstart is not template based"
else:
print rc
return True
clobber = False
if "add" in self.args:
clobber = options.clobber
if "copy" in self.args:
if self.options.newname:
# FIXME: this should just use the copy function!
if obj.COLLECTION_TYPE == "distro":
return self.api.copy_distro(obj, self.options.newname)
if obj.COLLECTION_TYPE == "profile":
return self.api.copy_profile(obj, self.options.newname)
if obj.COLLECTION_TYPE == "system":
return self.api.copy_system(obj, self.options.newname)
if obj.COLLECTION_TYPE == "repo":
return self.api.copy_repo(obj, self.options.newname)
if obj.COLLECTION_TYPE == "image":
return self.api.copy_image(obj, self.options.newname)
raise CX(_("internal error, don't know how to copy"))
else:
raise CX(_("--newname is required"))
opt_sync = not options.nosync
opt_triggers = not options.notriggers
# ** WARNING: COMPLICATED **
# what operation we call depends on what type of object we are editing
# and what the operation is. The details behind this is that the
# add operation has special semantics around adding objects that might
# clobber other objects, and we don't want that to happen. Edit
# does not have to check for named clobbering but still needs
# to check for IP/MAC clobbering in some scenarios (FIXME).
# this is all enforced by collections.py though we need to make
# the apppropriate call to add to invoke the safety code in the right
# places -- and not in places where the safety code will generate
# errors under legit circumstances.
if not ("rename" in self.args):
if "add" in self.args:
if obj.COLLECTION_TYPE == "system":
# duplicate names and netinfo are both bad.
if not clobber:
rc = collect_fn().add(obj, save=True, with_sync=opt_sync, with_triggers=opt_triggers, check_for_duplicate_names=True, check_for_duplicate_netinfo=True)
else:
rc = collect_fn().add(obj, save=True, with_sync=opt_sync, with_triggers=opt_triggers, check_for_duplicate_names=False, check_for_duplicate_netinfo=True)
else:
# duplicate names are bad
if not clobber:
rc = collect_fn().add(obj, save=True, with_sync=opt_sync, with_triggers=opt_triggers, check_for_duplicate_names=True, check_for_duplicate_netinfo=False)
else:
rc = collect_fn().add(obj, save=True, with_sync=opt_sync, with_triggers=opt_triggers, check_for_duplicate_names=False, check_for_duplicate_netinfo=False)
else:
check_dup = False
if not "copy" in self.args:
check_dup = True
rc = collect_fn().add(obj, save=True, with_sync=opt_sync, with_triggers=opt_triggers, check_for_duplicate_netinfo=check_dup)
else:
# we are renaming here, so duplicate netinfo checks also
# need to be made.(FIXME)
rc = collect_fn().rename(obj, self.options.newname, with_triggers=opt_triggers)
return rc
def list_tree(self,collection,level):
"""
Print cobbler object tree as a, well, tree.
"""
def sorter(a,b):
return cmp(a.name,b.name)
collection2 = []
for c in collection:
collection2.append(c)
collection2.sort(sorter)
for item in collection2:
print _("%(indent)s%(type)s %(name)s") % {
"indent" : " " * level,
"type" : item.TYPE_NAME,
"name" : item.name
}
kids = item.get_children()
if kids is not None and len(kids) > 0:
self.list_tree(kids,level+1)
def list_list(self, collection):
"""
List all objects of a certain type.
"""
names = [ x.name for x in collection]
names.sort() # sorted() is 2.4 only
for name in names:
str = _("%(name)s") % { "name" : name }
print str
return True
def matches_args(self, args, list_of):
"""
Used to simplify some code around which arguments to add when.
"""
for x in args:
if x in list_of:
return True
return False
| gpl-2.0 |
praekelt/vumi-twitter | vxtwitter/tests/test_twitter.py | 1 | 23524 | from twisted.internet.defer import inlineCallbacks
from txtwitter.tests.fake_twitter import FakeTwitter, FakeImage, FakeMedia
from vumi.tests.utils import LogCatcher
from vumi.tests.helpers import VumiTestCase
from vumi.config import Config
from vumi.errors import ConfigError
from vumi.transports.tests.helpers import TransportHelper
from vxtwitter.twitter import (
ConfigTwitterEndpoints, TwitterTransport)
def open_fake_file(file_path, mode):
return FakeImage(file_path, 'contents')
class TestTwitterEndpointsConfig(VumiTestCase):
def test_clean_no_endpoints(self):
class ToyConfig(Config):
endpoints = ConfigTwitterEndpoints("test endpoints")
self.assertRaises(ConfigError, ToyConfig, {'endpoints': {}})
def test_clean_same_endpoints(self):
class ToyConfig(Config):
endpoints = ConfigTwitterEndpoints("test endpoints")
self.assertRaises(ConfigError, ToyConfig, {'endpoints': {
'dms': 'default',
'tweets': 'default'
}})
class TestTwitterTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.twitter = FakeTwitter()
self.user = self.twitter.new_user('me', 'me')
self.client = self.twitter.get_client(self.user.id_str)
self.patch(
TwitterTransport, 'get_client', lambda *a, **kw: self.client)
self.tx_helper = self.add_helper(TransportHelper(TwitterTransport))
self.config = {
'screen_name': 'me',
'consumer_key': 'consumer1',
'consumer_secret': 'consumersecret1',
'access_token': 'token1',
'access_token_secret': 'tokensecret1',
'terms': ['arnold', 'the', 'term'],
'endpoints': {
'tweets': 'tweet_endpoint',
'dms': 'dm_endpoint'
}
}
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport.open_file = open_fake_file
FakeImage.close = lambda _: None
def test_config_endpoints_default(self):
del self.config['endpoints']
self.config['transport_name'] = 'twitter'
config = TwitterTransport.CONFIG_CLASS(self.config)
self.assertEqual(config.endpoints, {'tweets': 'default'})
@inlineCallbacks
def test_config_no_tracking_stream(self):
self.config['terms'] = []
transport = yield self.tx_helper.get_transport(self.config)
self.assertEqual(transport.track_stream, None)
@inlineCallbacks
def test_tracking_tweets(self):
someone = self.twitter.new_user('someone', 'someone')
tweet = self.twitter.new_tweet('arnold', someone.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], 'NO_USER')
self.assertEqual(msg['content'], 'arnold')
self.assertEqual(
msg['transport_metadata'],
{'twitter': {'status_id': tweet.id_str}})
self.assertEqual(msg['helper_metadata'], {
'twitter': {
'in_reply_to_status_id': None,
'in_reply_to_screen_name': None,
'user_mentions': []
}
})
@inlineCallbacks
def test_tracking_reply_tweets(self):
someone = self.twitter.new_user('someone', 'someone')
someone_else = self.twitter.new_user('someone_else', 'someone_else')
tweet1 = self.twitter.new_tweet('@someone_else hello', someone.id_str)
tweet2 = self.twitter.new_tweet(
'@someone arnold', someone_else.id_str, reply_to=tweet1.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone_else')
self.assertEqual(msg['to_addr'], '@someone')
self.assertEqual(msg['content'], 'arnold')
self.assertEqual(
msg['transport_metadata'],
{'twitter': {'status_id': tweet2.id_str}})
self.assertEqual(msg['helper_metadata'], {
'twitter': {
'in_reply_to_status_id': tweet1.id_str,
'in_reply_to_screen_name': 'someone',
'user_mentions': [{
'id_str': someone.id_str,
'id': int(someone.id_str),
'indices': [0, 8],
'screen_name': someone.screen_name,
'name': someone.name,
}]
}
})
def test_tracking_own_messages(self):
with LogCatcher() as lc:
tweet = self.twitter.new_tweet('arnold', self.user.id_str)
tweet = tweet.to_dict(self.twitter)
self.assertTrue(any(
"Tracked own tweet:" in msg for msg in lc.messages()))
@inlineCallbacks
def test_inbound_tweet(self):
someone = self.twitter.new_user('someone', 'someone')
tweet = self.twitter.new_tweet('@me hello', someone.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], '@me')
self.assertEqual(msg['content'], 'hello')
self.assertEqual(msg.get_routing_endpoint(), 'tweet_endpoint')
self.assertEqual(
msg['transport_metadata'],
{'twitter': {'status_id': tweet.id_str}})
self.assertEqual(msg['helper_metadata'], {
'twitter': {
'in_reply_to_status_id': None,
'in_reply_to_screen_name': 'me',
'user_mentions': [{
'id_str': self.user.id_str,
'id': int(self.user.id_str),
'indices': [0, 3],
'screen_name': self.user.screen_name,
'name': self.user.name,
}]
}
})
@inlineCallbacks
def test_inbound_tweet_reply(self):
someone = self.twitter.new_user('someone', 'someone')
tweet1 = self.twitter.new_tweet('@someone hello', self.user.id_str)
tweet2 = self.twitter.new_tweet(
'@me goodbye', someone.id_str, reply_to=tweet1.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], '@me')
self.assertEqual(msg['content'], 'goodbye')
self.assertEqual(
msg['transport_metadata'],
{'twitter': {'status_id': tweet2.id_str}})
self.assertEqual(msg['helper_metadata'], {
'twitter': {
'in_reply_to_status_id': tweet1.id_str,
'in_reply_to_screen_name': 'me',
'user_mentions': [{
'id_str': self.user.id_str,
'id': int(self.user.id_str),
'indices': [0, 3],
'screen_name': self.user.screen_name,
'name': self.user.name,
}]
}
})
def test_inbound_own_tweet(self):
with LogCatcher() as lc:
self.twitter.new_tweet('hello', self.user.id_str)
self.assertTrue(any(
"Received own tweet on user stream" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_inbound_tweet_no_endpoint(self):
self.config['endpoints'] = {'dms': 'default'}
yield self.tx_helper.get_transport(self.config)
someone = self.twitter.new_user('someone', 'someone')
with LogCatcher() as lc:
self.twitter.new_tweet('@me hello', someone.id_str)
self.assertTrue(any(
"Discarding tweet received on user stream, no endpoint "
"configured for tweets" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_inbound_dm(self):
someone = self.twitter.new_user('someone', 'someone')
dm = self.twitter.new_dm('hello @me', someone.id_str, self.user.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], '@me')
self.assertEqual(msg['content'], 'hello @me')
self.assertEqual(msg.get_routing_endpoint(), 'dm_endpoint')
self.assertEqual(msg['helper_metadata'], {
'dm_twitter': {
'id': dm.id_str,
'user_mentions': [{
'id_str': self.user.id_str,
'id': int(self.user.id_str),
'indices': [6, 9],
'screen_name': self.user.screen_name,
'name': self.user.name,
}]
}
})
def test_inbound_own_dm(self):
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.new_dm('hello', self.user.id_str, someone.id_str)
self.assertTrue(any(
"Received own DM on user stream" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_inbound_dm_no_endpoint(self):
self.config['endpoints'] = {'tweets': 'default'}
yield self.tx_helper.get_transport(self.config)
someone = self.twitter.new_user('someone', 'someone')
with LogCatcher() as lc:
self.twitter.new_dm('hello @me', someone.id_str, self.user.id_str)
self.assertTrue(any(
"Discarding DM received on user stream, no endpoint "
"configured for DMs" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_auto_following(self):
self.config['autofollow'] = True
yield self.tx_helper.get_transport(self.config)
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.add_follow(someone.id_str, self.user.id_str)
self.assertTrue(any(
"Received follow on user stream" in msg
for msg in lc.messages()))
self.assertTrue(any(
"Auto-following '@someone'" in msg
for msg in lc.messages()))
follow = self.twitter.get_follow(self.user.id_str, someone.id_str)
self.assertEqual(follow.source_id, self.user.id_str)
self.assertEqual(follow.target_id, someone.id_str)
@inlineCallbacks
def test_auto_following_disabled(self):
self.config['autofollow'] = False
yield self.tx_helper.get_transport(self.config)
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.add_follow(someone.id_str, self.user.id_str)
self.assertTrue(any(
"Received follow on user stream" in msg
for msg in lc.messages()))
follow = self.twitter.get_follow(self.user.id_str, someone.id_str)
self.assertTrue(follow is None)
@inlineCallbacks
def test_auto_response_tweet(self):
self.config['autoresponse'] = True
self.config['autoresponse_type'] = 'tweets'
yield self.tx_helper.get_transport(self.config)
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.add_follow(someone.id_str, self.user.id_str)
# Assert that message has been published
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], '@me')
self.assertEqual(msg['content'], None)
self.assertEqual(msg.get_routing_endpoint(), 'tweet_endpoint')
self.assertEqual(msg['in_reply_to'], None)
self.assertTrue(any(
"Publish null message to vumi" in msg
for msg in lc.messages()))
self.assertTrue(any(
"Send null message to vumi for auto-follow '@someone'" in msg
for msg in lc.messages()))
# Assert that following is not happening
follow = self.twitter.get_follow(self.user.id_str, someone.id_str)
self.assertTrue(follow is None)
@inlineCallbacks
def test_auto_response_dm(self):
self.config['autoresponse'] = True
self.config['autoresponse_type'] = 'dms'
yield self.tx_helper.get_transport(self.config)
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.add_follow(someone.id_str, self.user.id_str)
# Assert that message has been published
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], '@me')
self.assertEqual(msg['content'], None)
self.assertEqual(msg.get_routing_endpoint(), 'dm_endpoint')
self.assertEqual(msg['in_reply_to'], None)
self.assertTrue(any(
"Publish null message to vumi" in msg
for msg in lc.messages()))
self.assertTrue(any(
"Send null message to vumi for auto-follow '@someone'" in msg
for msg in lc.messages()))
# Assert that following is not happening
follow = self.twitter.get_follow(self.user.id_str, someone.id_str)
self.assertTrue(follow is None)
@inlineCallbacks
def test_auto_response_auto_follow_enabled(self):
self.config['autoresponse'] = True
self.config['autofollow'] = True
self.config['autoresponse_type'] = 'tweets'
yield self.tx_helper.get_transport(self.config)
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.add_follow(someone.id_str, self.user.id_str)
# Assert that message has been published
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], '@me')
self.assertEqual(msg['content'], None)
self.assertEqual(msg.get_routing_endpoint(), 'tweet_endpoint')
self.assertEqual(msg['in_reply_to'], None)
# Check log messages
self.assertTrue(any(
"Received follow on user stream" in msg
for msg in lc.messages()))
self.assertTrue(any(
"Publish null message to vumi" in msg
for msg in lc.messages()))
self.assertTrue(any(
"Send null message to vumi for auto-follow '@someone'" in msg
for msg in lc.messages()))
# Assert that following is happening
follow = self.twitter.get_follow(self.user.id_str, someone.id_str)
self.assertTrue(follow is not None)
def test_inbound_own_follow(self):
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.add_follow(self.user.id_str, someone.id_str)
self.assertTrue(any(
"Received own follow on user stream" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_tweet_sending(self):
self.twitter.new_user('someone', 'someone')
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', to_addr='@someone', endpoint='tweet_endpoint')
[ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(ack['user_message_id'], msg['message_id'])
tweet = self.twitter.get_tweet(ack['sent_message_id'])
self.assertEqual(tweet.text, '@someone hello')
self.assertEqual(tweet.reply_to, None)
@inlineCallbacks
def test_upload_media(self):
media_id = yield self.transport.upload_media_and_get_id(
{'file_path': 'image'})
media = self.twitter.get_media(media_id)
expected_media = FakeMedia(media_id, FakeImage('image', 'contents'),
additional_owners={})
self.assertEqual(
media.to_dict(self.twitter), expected_media.to_dict(self.twitter))
@inlineCallbacks
def test_tweet_with_embedded_media(self):
expected_id = str(self.twitter._next_media_id)
self.twitter.new_user('someone', 'someone')
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', to_addr='@someone', endpoint='tweet_endpoint',
helper_metadata={'twitter': {
'media': [{'file_path': 'image'}],
}})
[ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(ack['user_message_id'], msg['message_id'])
tweet = self.twitter.get_tweet(ack['sent_message_id'])
tweet_dict = tweet.to_dict(self.twitter)
media_id = tweet_dict.get('media_ids').split(',')[0]
media = self.twitter.get_media(media_id)
expected_media = FakeMedia(expected_id, FakeImage('image', 'contents'),
additional_owners={})
self.assertEqual(
media.to_dict(self.twitter), expected_media.to_dict(self.twitter))
self.assertEqual(tweet_dict['text'], '@someone hello')
self.assertEqual(tweet_dict['media_ids'].rstrip(','), expected_id)
@inlineCallbacks
def test_tweet_with_multiple_images(self):
self.twitter.new_user('someone', 'someone')
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', to_addr='@someone', endpoint='tweet_endpoint',
helper_metadata={'twitter': {
'media': [{'file_path': 'img1'}, {'file_path': 'img2'}],
}})
[ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(ack['user_message_id'], msg['message_id'])
tweet = self.twitter.get_tweet(ack['sent_message_id'])
tweet_dict = tweet.to_dict(self.twitter)
media_ids = tweet_dict['media_ids'].rstrip(',')
self.assertEqual(len(media_ids.split(',')), 2)
for expected_id in self.twitter.media.keys():
self.assertIn(expected_id, media_ids)
@inlineCallbacks
def test_tweet_reply_sending(self):
tweet1 = self.twitter.new_tweet(
'hello', self.user.id_str, endpoint='tweet_endpoint')
inbound_msg = self.tx_helper.make_inbound(
'hello',
from_addr='@someone',
endpoint='tweet_endpoint',
transport_metadata={
'twitter': {'status_id': tweet1.id_str}
})
msg = yield self.tx_helper.make_dispatch_reply(inbound_msg, "goodbye")
[ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(ack['user_message_id'], msg['message_id'])
tweet2 = self.twitter.get_tweet(ack['sent_message_id'])
self.assertEqual(tweet2.text, '@someone goodbye')
self.assertEqual(tweet2.reply_to, tweet1.id_str)
@inlineCallbacks
def test_tweet_sending_failure(self):
def fail(*a, **kw):
raise Exception(':(')
self.patch(self.client, 'statuses_update', fail)
with LogCatcher() as lc:
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', endpoint='tweet_endpoint')
self.assertEqual(
[e['message'][0] for e in lc.errors],
["'Outbound twitter message failed: :('"])
[nack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(nack['user_message_id'], msg['message_id'])
self.assertEqual(nack['sent_message_id'], msg['message_id'])
self.assertEqual(nack['nack_reason'], ':(')
@inlineCallbacks
def test_dm_sending(self):
self.twitter.new_user('someone', 'someone')
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', to_addr='@someone', endpoint='dm_endpoint')
[ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(ack['user_message_id'], msg['message_id'])
dm = self.twitter.get_dm(ack['sent_message_id'])
sender = self.twitter.get_user(dm.sender_id_str)
recipient = self.twitter.get_user(dm.recipient_id_str)
self.assertEqual(dm.text, 'hello')
self.assertEqual(sender.screen_name, 'me')
self.assertEqual(recipient.screen_name, 'someone')
@inlineCallbacks
def test_dm_sending_failure(self):
def fail(*a, **kw):
raise Exception(':(')
self.patch(self.client, 'direct_messages_new', fail)
with LogCatcher() as lc:
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', endpoint='dm_endpoint')
self.assertEqual(
[e['message'][0] for e in lc.errors],
["'Outbound twitter message failed: :('"])
[nack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(nack['user_message_id'], msg['message_id'])
self.assertEqual(nack['sent_message_id'], msg['message_id'])
self.assertEqual(nack['nack_reason'], ':(')
def test_track_stream_for_non_tweet(self):
with LogCatcher() as lc:
self.transport.handle_track_stream({'foo': 'bar'})
self.assertEqual(
lc.messages(),
["Received non-tweet from tracking stream: {'foo': 'bar'}"])
def test_user_stream_for_unsupported_message(self):
with LogCatcher() as lc:
self.transport.handle_user_stream({'foo': 'bar'})
self.assertEqual(
lc.messages(),
["Received a user stream message that we do not handle: "
"{'foo': 'bar'}"])
def test_tweet_content_with_mention_at_start(self):
self.assertEqual('hello', self.transport.tweet_content({
'id_str': '12345',
'text': '@fakeuser hello',
'user': {},
'entities': {
'user_mentions': [{
'id_str': '123',
'screen_name': 'fakeuser',
'name': 'Fake User',
'indices': [0, 8]
}]
},
}))
def test_tweet_content_with_mention_not_at_start(self):
self.assertEqual('hello @fakeuser!', self.transport.tweet_content({
'id_str': '12345',
'text': 'hello @fakeuser!',
'user': {},
'entities': {
'user_mentions': [{
'id_str': '123',
'screen_name': 'fakeuser',
'name': 'Fake User',
'indices': [6, 14]
}]
},
}))
def test_tweet_content_with_no_mention(self):
self.assertEqual('hello', self.transport.tweet_content({
'id_str': '12345',
'text': 'hello',
'user': {},
'entities': {
'user_mentions': []
},
}))
def test_tweet_content_with_no_user_in_text(self):
self.assertEqual('NO_USER hello', self.transport.tweet_content({
'id_str': '12345',
'text': 'NO_USER hello',
'user': {},
'entities': {
'user_mentions': []
},
}))
| bsd-3-clause |
a10networks/a10-neutron-lbaas | a10_neutron_lbaas/tests/unit/v2/test_handler_member.py | 1 | 8946 | # Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import mock
import a10_neutron_lbaas.a10_exceptions as a10_ex
from a10_neutron_lbaas.tests.unit.v2 import fake_objs
from a10_neutron_lbaas.tests.unit.v2 import test_base
LOG = logging.getLogger(__name__)
def return_one(*args):
return 1
def return_two(*args):
return 2
class TestMembers(test_base.HandlerTestBase):
def set_count_1(self):
self.a.member.neutron.member_count = return_one
def set_count_2(self):
self.a.member.neutron.member_count = return_two
def test_get_ip(self):
m = fake_objs.FakeMember(pool=mock.MagicMock())
self.a.member.neutron.member_get_ip(None, m, False)
self.print_mocks()
self.a.neutron.member_get_ip.assert_called_with(
None, m, False)
def test_get_name(self):
m = fake_objs.FakeMember(pool=mock.MagicMock())
z = self.a.member._get_name(m, '1.1.1.1')
self.assertEqual(z, '_get-o_1_1_1_1_neutron')
def test_count(self):
self.a.member.neutron.member_count(
None, fake_objs.FakeMember(pool=mock.MagicMock()))
def _test_create(self, admin_state_up=True, uuid_name=False, conn_limit=None, conn_resume=None):
if uuid_name:
old = self.a.config.get('member_name_use_uuid')
self.a.config._config.member_name_use_uuid = True
m = fake_objs.FakeMember(admin_state_up=admin_state_up,
pool=mock.MagicMock())
ip = self.a.member.neutron.member_get_ip(None, m, True)
if uuid_name:
name = m.id
else:
name = self.a.member._get_name(m, ip)
self.a.member.create(None, m)
if admin_state_up:
status = self.a.last_client.slb.UP
else:
status = self.a.last_client.slb.DOWN
server_args = {}
if conn_limit is not None:
server_args['conn-limit'] = conn_limit
if conn_resume is not None:
server_args['conn-resume'] = conn_resume
self.a.last_client.slb.server.create.assert_called_with(
name, ip,
status=status,
server_templates=None,
config_defaults=mock.ANY,
axapi_args={'server': server_args})
self.a.last_client.slb.service_group.member.create.assert_called_with(
m.pool.id, name, m.protocol_port, status=status,
axapi_args={'member': {}})
if uuid_name:
self.a.config._config.member_name_use_uuid = old
def test_create_connlimit(self):
for k, v in self.a.config.get_devices().items():
v['conn-limit'] = 1337
self._test_create(conn_limit=1337)
def test_create_connlimit_oob(self):
for k, v in self.a.config.get_devices().items():
v['conn-limit'] = 8000001
try:
self._test_create(conn_limit=8000001)
except a10_ex.ConnLimitOutOfBounds:
pass
def test_create_connlimit_uob(self):
for k, v in self.a.config.get_devices().items():
v['conn-limit'] = 0
try:
self._test_create(conn_limit=0)
except a10_ex.ConnLimitOutOfBounds:
pass
def test_create_connresume(self):
for k, v in self.a.config.get_devices().items():
v['conn-resume'] = 1337
self._test_create(conn_resume=1337)
def test_create_connresume_oob(self):
for k, v in self.a.config.get_devices().items():
v['conn-resume'] = 1000001
try:
self._test_create(conn_resume=1000001)
except a10_ex.ConnLimitOutOfBounds:
pass
def test_create_connresume_uob(self):
for k, v in self.a.config.get_devices().items():
v['conn-resume'] = 0
try:
self._test_create(conn_resume=0)
except a10_ex.ConnLimitOutOfBounds:
pass
def test_update_down(self):
m = fake_objs.FakeMember(False, pool=mock.MagicMock())
ip = self.a.member.neutron.member_get_ip(None, m, True)
name = self.a.member._get_name(m, ip)
self.a.member.update(None, m, m)
self.a.last_client.slb.service_group.member.update.assert_called_with(
m.pool.id, name, m.protocol_port, self.a.last_client.slb.DOWN,
axapi_args={'member': {}})
def test_delete(self):
m = fake_objs.FakeMember(False, pool=mock.MagicMock())
ip = self.a.member.neutron.member_get_ip(None, m, True)
self.set_count_1()
self.a.member.delete(None, m)
self.a.last_client.slb.server.delete(ip)
def test_delete_count_gt_one(self):
m = fake_objs.FakeMember(False, pool=mock.MagicMock())
ip = self.a.member.neutron.member_get_ip(None, m, True)
name = self.a.member._get_name(m, ip)
self.set_count_2()
self.a.member.delete(None, m)
self.a.last_client.slb.service_group.member.delete.assert_called_with(
m.pool_id, name, m.protocol_port)
def _test_create_expressions(self, os_name, pattern, expressions=None):
self.a.config.get_member_expressions = self._get_expressions_mock
expressions = expressions or self.a.config.get_member_expressions()
expected = expressions.get(pattern, {}).get("json", {})
admin_state = self.a.last_client.slb.UP
m = fake_objs.FakeMember(admin_state_up=admin_state,
pool=mock.MagicMock())
m.name = os_name
handler = self.a.member
handler.create(None, m)
# s = str(self.a.last_client.mock_calls)
self.a.last_client.slb.server.create.assert_called_with(
mock.ANY,
mock.ANY,
status=mock.ANY,
server_templates=None,
config_defaults=expected,
axapi_args={'server': {}})
# self.assertIn("member.create", s)
# self.assertIn(str(expected), s)
def test_create_expressions_none(self):
self._test_create_expressions("server", None, {})
def test_create_expressions_match_beginning(self):
self._test_create_expressions("secureserver", self.EXPR_BEGIN)
def test_create_expressions_match_end(self):
self._test_create_expressions("serverweb", self.EXPR_END)
def test_create_expressions_match_charclass(self):
self._test_create_expressions("serverwwserver", self.EXPR_CLASS)
def test_create_expressions_nomatch(self):
self.a.config.get_member_expressions = self._get_expressions_mock
admin_state = self.a.last_client.slb.UP
m = fake_objs.FakeMember(admin_state_up=admin_state,
pool=mock.MagicMock())
m.name = "myserver"
handler = self.a.member
handler.create(None, m)
self.a.last_client.slb.server.create.assert_called_with(
mock.ANY, mock.ANY,
status=mock.ANY,
server_templates=None,
config_defaults={},
axapi_args={'server': {}})
def test_create_empty_name_noexception(self):
self.a.config.get_member_expressions = self._get_expressions_mock
admin_state = self.a.last_client.slb.UP
m = fake_objs.FakeMember(admin_state_up=admin_state,
pool=mock.MagicMock())
m.name = None
handler = self.a.member
handler.create(None, m)
self.a.last_client.slb.server.create.assert_called_with(
mock.ANY, mock.ANY,
status=mock.ANY,
server_templates=None,
config_defaults={},
axapi_args={'server': {}})
def test_create_with_template(self,):
template = {
"server": {
"template-server": "sg1"
}
}
expect = {'template-server': 'sg1'}
for k, v in self.a.config.get_devices().items():
v['templates'] = template
m = fake_objs.FakeMember(admin_state_up=True,
pool=mock.MagicMock())
self.a.member.create(None, m)
self.a.last_client.slb.server.create.assert_called_with(
mock.ANY, mock.ANY,
status=mock.ANY,
config_defaults=mock.ANY,
server_templates=expect,
axapi_args={'server': {}})
| apache-2.0 |
wunderlins/learning | python/django/lib/python2.7/site-packages/django/contrib/gis/db/backends/oracle/operations.py | 64 | 10630 | """
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.db.backends.oracle.base import Database
from django.db.backends.oracle.operations import DatabaseOperations
from django.utils import six
DEFAULT_TOLERANCE = '0.05'
class SDOOperator(SpatialOperator):
sql_template = "%(func)s(%(lhs)s, %(rhs)s) = 'TRUE'"
class SDODistance(SpatialOperator):
sql_template = "SDO_GEOM.SDO_DISTANCE(%%(lhs)s, %%(rhs)s, %s) %%(op)s %%%%s" % DEFAULT_TOLERANCE
class SDODWithin(SpatialOperator):
sql_template = "SDO_WITHIN_DISTANCE(%(lhs)s, %(rhs)s, %%s) = 'TRUE'"
class SDODisjoint(SpatialOperator):
sql_template = "SDO_GEOM.RELATE(%%(lhs)s, 'DISJOINT', %%(rhs)s, %s) = 'DISJOINT'" % DEFAULT_TOLERANCE
class SDORelate(SpatialOperator):
sql_template = "SDO_RELATE(%(lhs)s, %(rhs)s, 'mask=%(mask)s') = 'TRUE'"
def check_relate_argument(self, arg):
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
if not isinstance(arg, six.string_types) or not mask_regex.match(arg):
raise ValueError('Invalid SDO_RELATE mask: "%s"' % arg)
def as_sql(self, connection, lookup, template_params, sql_params):
template_params['mask'] = sql_params.pop()
return super(SDORelate, self).as_sql(connection, lookup, template_params, sql_params)
class OracleOperations(BaseSpatialOperations, DatabaseOperations):
name = 'oracle'
oracle = True
disallowed_aggregates = (aggregates.Collect, aggregates.Extent3D, aggregates.MakeLine)
Adapter = OracleSpatialAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'SDO_GEOM.SDO_AREA'
gml = 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent = 'SDO_AGGR_MBR'
intersection = 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
function_names = {
'Area': 'SDO_GEOM.SDO_AREA',
'Centroid': 'SDO_GEOM.SDO_CENTROID',
'Difference': 'SDO_GEOM.SDO_DIFFERENCE',
'Distance': 'SDO_GEOM.SDO_DISTANCE',
'Intersection': 'SDO_GEOM.SDO_INTERSECTION',
'Length': 'SDO_GEOM.SDO_LENGTH',
'NumGeometries': 'SDO_UTIL.GETNUMELEM',
'NumPoints': 'SDO_UTIL.GETNUMVERTICES',
'Perimeter': 'SDO_GEOM.SDO_LENGTH',
'PointOnSurface': 'SDO_GEOM.SDO_POINTONSURFACE',
'Reverse': 'SDO_UTIL.REVERSE_LINESTRING',
'SymDifference': 'SDO_GEOM.SDO_XOR',
'Transform': 'SDO_CS.TRANSFORM',
'Union': 'SDO_GEOM.SDO_UNION',
}
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
gis_operators = {
'contains': SDOOperator(func='SDO_CONTAINS'),
'coveredby': SDOOperator(func='SDO_COVEREDBY'),
'covers': SDOOperator(func='SDO_COVERS'),
'disjoint': SDODisjoint(),
'intersects': SDOOperator(func='SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'equals': SDOOperator(func='SDO_EQUAL'),
'exact': SDOOperator(func='SDO_EQUAL'),
'overlaps': SDOOperator(func='SDO_OVERLAPS'),
'same_as': SDOOperator(func='SDO_EQUAL'),
'relate': SDORelate(), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches': SDOOperator(func='SDO_TOUCH'),
'within': SDOOperator(func='SDO_INSIDE'),
'distance_gt': SDODistance(op='>'),
'distance_gte': SDODistance(op='>='),
'distance_lt': SDODistance(op='<'),
'distance_lte': SDODistance(op='<='),
'dwithin': SDODWithin(),
}
truncate_params = {'relate': None}
unsupported_functions = {
'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG',
'BoundingCircle', 'Envelope',
'ForceRHR', 'GeoHash', 'MemSize', 'Scale',
'SnapToGrid', 'Translate',
}
def geo_quote_name(self, name):
return super(OracleOperations, self).geo_quote_name(name).upper()
def get_db_converters(self, expression):
converters = super(OracleOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
geometry_fields = (
'PointField', 'GeometryField', 'LineStringField',
'PolygonField', 'MultiPointField', 'MultiLineStringField',
'MultiPolygonField', 'GeometryCollectionField', 'GeomField',
'GMLField',
)
if internal_type in geometry_fields:
converters.append(self.convert_textfield_value)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
def convert_extent(self, clob, srid):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read(), srid)
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def convert_geom(self, value, geo_field):
if value:
if isinstance(value, Database.LOB):
value = value.read()
return Geometry(value, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL name.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import OracleGeometryColumns
return OracleGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys
return OracleSpatialRefSys
def modify_insert_params(self, placeholder, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888.
"""
if placeholder == 'NULL':
return []
return super(OracleOperations, self).modify_insert_params(placeholder, params)
| gpl-2.0 |
Zhongqilong/kbengine | kbe/src/lib/python/Lib/heapq.py | 208 | 17997 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| lgpl-3.0 |
staticfloat/pybladeRF | bladeRF/__init__.py | 2 | 4512 | from ._cffi import ffi, ptop
has_numpy = True
try:
import numpy as np
except ImportError:
has_numpy = False
from .init import (
open,
close,
open_device,
get_device_list,
free_device_list,
)
from .ctrl import (
enable_module,
set_sample_rate,
get_sampling,
set_sampling,
get_sample_rate,
get_rational_sample_rate,
set_txvga2,
get_txvga2,
set_txvga1,
get_txvga1,
set_lna_gain,
get_lna_gain,
set_rxvga1,
get_rxvga1,
set_rxvga2,
get_rxvga2,
set_bandwidth,
get_bandwidth,
set_lpf_mode,
get_lpf_mode,
select_band,
set_frequency,
get_frequency,
)
from .data import (
raw_callback,
init_stream,
stream,
deinit_stream,
tx,
rx,
sync_config,
)
from .misc import (
log_set_verbosity,
)
from .info import (
get_fpga_size,
)
import bladeRF._cffi
# compilation happens here in verify(),
# all C code must be defined at this point
# and only after here may the library functions
# and constants be accessed. hence the many import
# name tricks used through the implementation
# http://explosm.net/comics/420/
ffi.cdef("""
float* samples_to_floats(void*, int);
void free(void *ptr);
""")
bladeRF._cffi.lib = lib = ffi.verify("""
#include <libbladeRF.h>
#include <stdlib.h>
/* this helper function is to turn the two 16 bit ints per sample into
two normalized floats, so that it can be passed directly to
numpy.frombuffer which can only take two 32-bit floats and turn them
into a complex64 */
float* samples_to_floats(void *samples, int num_samples) {
int i;
int16_t* data = (int16_t*)samples;
float* buffer = (float*)malloc(2 * num_samples * sizeof(float));
for (i = 0; i < num_samples; i++) {
buffer[i] = (float)data[i] * (1.0f/2048.0f);
}
return buffer;
}
""", libraries=['bladeRF'])
def samples_to_floats(samples, num_samples):
"""Call optimized C function to alocate and return pointer to
buffer full of normalized I/Q floats.
"""
return ffi.gc(lib.samples_to_floats(samples, num_samples), lib.free)
def to_float_buffer(raw_samples, num_samples):
"""Return an FFI buffer of I/Q floats."""
return bladeRF.ffi.buffer(samples_to_floats(raw_samples, num_samples), 2*num_samples*bladeRF.ffi.sizeof('float'))
if has_numpy:
def samples_to_narray(samples, num_samples):
"""Return a numpy array of type complex64 from the samples."""
return np.frombuffer(to_float_buffer(samples, num_samples), np.complex64)
MODULE_TX = lib.BLADERF_MODULE_TX
MODULE_RX = lib.BLADERF_MODULE_RX
SAMPLING_UNKNOWN = lib.BLADERF_SAMPLING_UNKNOWN
SAMPLING_INTERNAL = lib.BLADERF_SAMPLING_INTERNAL
SAMPLING_EXTERNAL = lib.BLADERF_SAMPLING_EXTERNAL
LNA_GAIN_UNKNOWN = lib.BLADERF_LNA_GAIN_UNKNOWN
LNA_GAIN_BYPASS = lib.BLADERF_LNA_GAIN_BYPASS
LNA_GAIN_MID = lib.BLADERF_LNA_GAIN_MID
LNA_GAIN_MAX = lib.BLADERF_LNA_GAIN_MAX
RXVGA1_GAIN_MIN = lib.BLADERF_RXVGA1_GAIN_MIN
RXVGA1_GAIN_MAX = lib.BLADERF_RXVGA1_GAIN_MAX
RXVGA2_GAIN_MIN = lib.BLADERF_RXVGA2_GAIN_MIN
RXVGA2_GAIN_MAX = lib.BLADERF_RXVGA2_GAIN_MAX
TXVGA1_GAIN_MIN = lib.BLADERF_TXVGA1_GAIN_MIN
TXVGA1_GAIN_MAX = lib.BLADERF_TXVGA1_GAIN_MAX
TXVGA2_GAIN_MIN = lib.BLADERF_TXVGA2_GAIN_MIN
TXVGA2_GAIN_MAX = lib.BLADERF_TXVGA2_GAIN_MAX
SAMPLERATE_MIN = lib.BLADERF_SAMPLERATE_MIN
SAMPLERATE_REC_MAX = lib.BLADERF_SAMPLERATE_REC_MAX
BANDWIDTH_MIN = lib.BLADERF_BANDWIDTH_MIN
BANDWIDTH_MAX = lib.BLADERF_BANDWIDTH_MAX
FREQUENCY_MIN = lib.BLADERF_FREQUENCY_MIN
FREQUENCY_MAX = lib.BLADERF_FREQUENCY_MAX
LPF_NORMAL = lib.BLADERF_LPF_NORMAL
LPF_BYPASSED = lib.BLADERF_LPF_BYPASSED
LPF_DISABLED = lib.BLADERF_LPF_DISABLED
FORMAT_SC16_Q11 = lib.BLADERF_FORMAT_SC16_Q11
LOG_LEVEL_VERBOSE = lib.BLADERF_LOG_LEVEL_VERBOSE
LOG_LEVEL_DEBUG = lib.BLADERF_LOG_LEVEL_DEBUG
LOG_LEVEL_INFO = lib.BLADERF_LOG_LEVEL_INFO
LOG_LEVEL_WARNING = lib.BLADERF_LOG_LEVEL_WARNING
LOG_LEVEL_ERROR = lib.BLADERF_LOG_LEVEL_ERROR
LOG_LEVEL_CRITICAL = lib.BLADERF_LOG_LEVEL_CRITICAL
LOG_LEVEL_SILENT = lib.BLADERF_LOG_LEVEL_SILENT
from .errors import (
BladeRFException,
UnexpectedError,
RangeError,
InvalError,
MemError,
BladeIOError,
TimeoutError,
NodevError,
UnsupportedError,
MisalignedError,
ChecksumError,
)
from device import (
Device,
)
def power(samples):
return 10*np.log10(np.abs(np.vdot(samples, samples)))
def squelched(samples, level):
return power(samples) < level
| gpl-3.0 |
sallum/driver-telematics | RegressionDriver.py | 1 | 2127 | import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from random import sample
class RegressionDriver(object):
"""Class for Regression-based analysis of Driver traces"""
def __init__(self, driver, datadict):
"""Initialize by providing a (positive) driver example and a dictionary of (negative) driver references."""
self.driver = driver
self.numfeatures = self.driver.num_features
featurelist = []
self.__indexlist = []
for trace in self.driver.traces:
self.__indexlist.append(trace.identifier)
featurelist.append(trace.features)
# Initialize train and test np arrays
self.__traindata = np.asarray(featurelist)
self.__testdata = np.asarray(featurelist)
self.__trainlabels = np.ones((self.__traindata.shape[0],))
data = np.empty((0, driver.num_features), float)
setkeys = datadict.keys()
if driver.identifier in setkeys:
setkeys.remove(driver.identifier)
else:
setkeys = sample(setkeys, len(setkeys) - 1)
for key in setkeys:
if key != driver.identifier:
data = np.append(data, np.asarray(datadict[key]), axis=0)
self.__traindata = np.append(self.__traindata, data, axis=0)
self.__trainlabels = np.append(self.__trainlabels, np.zeros((data.shape[0],)), axis=0)
self.__y = np.ones((self.__testdata.shape[0],))
def classify(self):
"""Perform classification"""
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4)
clf.fit(self.__traindata, self.__trainlabels)
self.__y = clf.predict(self.__testdata)
def toKaggle(self):
"""Return string in Kaggle submission format"""
returnstring = ""
for i in xrange(len(self.__indexlist) - 1):
returnstring += "%d_%d,%.3f\n" % (self.driver.identifier, self.__indexlist[i], self.__y[i])
returnstring += "%d_%d,%.3f" % (self.driver.identifier, self.__indexlist[len(self.__indexlist)-1], self.__y[len(self.__indexlist)-1])
return returnstring
| bsd-2-clause |
happyleavesaoc/home-assistant | homeassistant/components/weather/yweather.py | 4 | 5381 | """
Support for the Yahoo! Weather service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/weather.yweather/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.weather import (
WeatherEntity, PLATFORM_SCHEMA, ATTR_FORECAST_TEMP)
from homeassistant.const import (TEMP_CELSIUS, CONF_NAME, STATE_UNKNOWN)
REQUIREMENTS = ["yahooweather==0.8"]
_LOGGER = logging.getLogger(__name__)
ATTR_FORECAST_CONDITION = 'condition'
ATTRIBUTION = "Weather details provided by Yahoo! Inc."
CONF_FORECAST = 'forecast'
CONF_WOEID = 'woeid'
DEFAULT_NAME = 'Yweather'
SCAN_INTERVAL = timedelta(minutes=10)
CONDITION_CLASSES = {
'cloudy': [26, 27, 28, 29, 30],
'fog': [19, 20, 21, 22, 23],
'hail': [17, 18, 35],
'lightning': [37],
'lightning-rainy': [38, 39],
'partlycloudy': [44],
'pouring': [40, 45],
'rainy': [9, 11, 12],
'snowy': [8, 13, 14, 15, 16, 41, 42, 43],
'snowy-rainy': [5, 6, 7, 10, 46, 47],
'sunny': [32],
'windy': [24],
'windy-variant': [],
'exceptional': [0, 1, 2, 3, 4, 25, 36],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_WOEID, default=None): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_FORECAST, default=0):
vol.All(vol.Coerce(int), vol.Range(min=0, max=5)),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Yahoo! weather platform."""
from yahooweather import get_woeid, UNIT_C, UNIT_F
unit = hass.config.units.temperature_unit
woeid = config.get(CONF_WOEID)
forecast = config.get(CONF_FORECAST)
name = config.get(CONF_NAME)
yunit = UNIT_C if unit == TEMP_CELSIUS else UNIT_F
# If not exists a customer WOEID/calculation from Home Assistant
if woeid is None:
woeid = get_woeid(hass.config.latitude, hass.config.longitude)
if woeid is None:
_LOGGER.warning("Can't retrieve WOEID from Yahoo!")
return False
yahoo_api = YahooWeatherData(woeid, yunit)
if not yahoo_api.update():
_LOGGER.critical("Can't retrieve weather data from Yahoo!")
return False
if forecast >= len(yahoo_api.yahoo.Forecast):
_LOGGER.error("Yahoo! only support %d days forecast",
len(yahoo_api.yahoo.Forecast))
return False
add_devices([YahooWeatherWeather(yahoo_api, name, forecast)], True)
class YahooWeatherWeather(WeatherEntity):
"""Representation of Yahoo! weather data."""
def __init__(self, weather_data, name, forecast):
"""Initialize the sensor."""
self._name = name
self._data = weather_data
self._forecast = forecast
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def condition(self):
"""Return the current condition."""
try:
return [k for k, v in CONDITION_CLASSES.items() if
int(self._data.yahoo.Now['code']) in v][0]
except IndexError:
return STATE_UNKNOWN
@property
def temperature(self):
"""Return the temperature."""
return self._data.yahoo.Now['temp']
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self._data.yahoo.Atmosphere['pressure']
@property
def humidity(self):
"""Return the humidity."""
return self._data.yahoo.Atmosphere['humidity']
@property
def visibility(self):
"""Return the visibility."""
return self._data.yahoo.Atmosphere['visibility']
@property
def wind_speed(self):
"""Return the wind speed."""
return self._data.yahoo.Wind['speed']
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def forecast(self):
"""Return the forecast array."""
try:
forecast_condition = \
[k for k, v in CONDITION_CLASSES.items() if
int(self._data.yahoo.Forecast[self._forecast]['code'])
in v][0]
except IndexError:
return STATE_UNKNOWN
return [{
ATTR_FORECAST_CONDITION: forecast_condition,
ATTR_FORECAST_TEMP:
self._data.yahoo.Forecast[self._forecast]['high'],
}]
def update(self):
"""Get the latest data from Yahoo! and updates the states."""
self._data.update()
if not self._data.yahoo.RawData:
_LOGGER.info("Don't receive weather data from Yahoo!")
return
class YahooWeatherData(object):
"""Handle the Yahoo! API object and limit updates."""
def __init__(self, woeid, temp_unit):
"""Initialize the data object."""
from yahooweather import YahooWeather
self._yahoo = YahooWeather(woeid, temp_unit)
@property
def yahoo(self):
"""Return Yahoo! API object."""
return self._yahoo
def update(self):
"""Get the latest data from Yahoo!."""
return self._yahoo.updateWeather()
| apache-2.0 |
apechimp/servo | tests/wpt/web-platform-tests/conformance-checkers/tools/picture.py | 238 | 29212 | # -*- coding: utf-8 -*-
import os
ccdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
template = """<!DOCTYPE html>
<meta charset=utf-8>
"""
errors = {
# missing src on img
"img-no-src": "<img alt>",
"img-no-src-with-srcset": "<img srcset=x alt>",
"img-no-src-with-picture": "<picture><img alt></picture>",
"img-no-src-with-srcset-and-picture": "<picture><img srcset=x alt></picture>",
"img-no-src-with-source": "<picture><source srcset=x><img alt></picture>",
# junk content in picture
"junk-text-before-img": "<picture>x<img src=x alt></picture>",
"junk-text-after-img": "<picture><img src=x alt>x</picture>",
"junk-text-before-source": "<picture>x<source srcset=x><img src=x alt></picture>",
"junk-text-after-source": "<picture><source srcset=x>x<img src=x alt></picture>",
"junk-br-before-img": "<picture><br><img src=x alt></picture>",
"junk-br-after-img": "<picture><img src=x alt><br></picture>",
"junk-br-before-source": "<picture><br><source srcset=x><img src=x alt></picture>",
"junk-br-after-source": "<picture><source srcset=x><br><img src=x alt></picture>",
"junk-video-before": "<picture><video></video><source srcset=x><img src=x alt></picture>",
"junk-video-no-img": "<picture><video></video></picture>",
"junk-p-before": "<picture><p></p><source srcset=x><img src=x alt></picture>",
"junk-p-after": "<picture><source srcset=x><img src=x alt><p></p></picture>",
"junk-p-wrapping": "<picture><p><source srcset=x><img src=x alt></p></picture>",
"junk-span-before": "<picture><span></span><source srcset=x><img src=x alt></picture>",
"junk-span-after": "<picture><source srcset=x><img src=x alt><span></span></picture>",
"junk-span-wrapping": "<picture><span><source srcset=x><img src=x alt></span></picture>",
"junk-picture-before": "<picture><picture><img src=x alt></picture><img src=x alt></picture>",
"junk-picture-wrapping": "<picture><picture><img src=x alt></picture></picture>",
"junk-figure-wrapping": "<picture><figure><img src=x alt></figure></picture>",
"junk-input-type-hidden": "<picture><input type=hidden name=x value=x><img src=x alt></picture>",
"junk-style-scroped": "<picture><style scroped></style><img src=x alt></picture>",
"junk-noscript": "<picture><img src=x alt><noscript></noscript></picture>",
"junk-noscript-after-source-no-img": "<picture><source srcset=x><noscript><img src=x alt></noscript></picture>",
"junk-svg": "<picture><img src=x alt><svg></svg></picture>",
"junk-svg-no-img": "<picture><svg></svg></picture>",
"junk-math-nog-img": "<picture><math></math></picture>",
# parents
"parent-ul": "<ul><picture><img src=x alt></picture></ul>",
"parent-dl": "<dl><picture><img src=x alt></picture></dl>",
"parent-hgroup": "<hgroup><h1>x</h1><picture><img src=x alt></picture></hgroup>",
"parent-noscript-in-head": "<noscript><picture><img src=x alt></picture></noscript>",
# invalid html syntax
"html-syntax-source-end-tag": "<picture><source srcset=x></source><img src=x alt></picture>",
"html-syntax-img-end-tag": "<picture><img src=x alt></img></picture>",
"html-syntax-picture-no-end-tag": "<picture><img src=x alt>",
"html-syntax-picture-slash": "<picture/><img src=x alt></picture>",
"html-syntax-picture-slash-no-end-tag": "<picture/><img src=x alt>",
# missing img in picture
"missing-img-empty-picture": "<picture></picture>",
"missing-img-only-source": "<picture><source srcset=x></picture>",
"missing-img-only-script": "<picture><script></script></picture>",
"missing-img-script-and-source": "<picture><script></script><source srcset=x></picture>",
"missing-img-source-and-script": "<picture><source srcset=x><script></script></picture>",
# multiple img in picture
"multiple-img": "<picture><img src=x alt><img src=x alt></picture>",
"multiple-img-with-script": "<picture><img src=x alt><script></script><img src=x alt></picture>",
"multiple-img-with-source": "<picture><source srcset=x><img src=x alt><img src=x alt></picture>",
"multiple-img-with-source-and-script": "<picture><source srcset=x><img src=x alt><script></script><img src=x alt></picture>",
# source after img
"source-after-img": "<picture><img src=x alt><source srcset=x></picture>",
"source-before-and-after-img": "<picture><source srcset=x><img src=x alt><source srcset=x></picture>",
# source with following sibling source element or img element with a srcset attribute
"always-matching-source-with-following-img-srcset": "<picture><source srcset=x><img src=x srcset=x alt></picture>",
"always-matching-source-with-following-source-srcset": "<picture><source srcset=x><source srcset=x><img src=x alt></picture>",
"always-matching-source-with-following-source-media": "<picture><source srcset=x><source srcset=x media=screen><img src=x alt></picture>",
"always-matching-source-with-following-source-type": "<picture><source srcset=x><source srcset=x type=image/gif><img src=x alt></picture>",
"always-matching-source-media-empty-with-following-source-srcset": "<picture><source srcset=x media><source srcset=x><img src=x alt></picture>",
"always-matching-source-media-spaces-with-following-source-srcset": "<picture><source srcset=x media=' \n\t'><source srcset=x><img src=x alt></picture>",
"always-matching-source-media-all-with-following-source-srcset": "<picture><source srcset=x media=all><source srcset=x><img src=x alt></picture>",
"always-matching-source-media-uppercase-with-following-source-srcset": "<picture><source srcset=x media=ALL><source srcset=x><img src=x alt></picture>",
"always-matching-source-media-all-spaces-with-following-source-srcset": "<picture><source srcset=x media=' all '><source srcset=x><img src=x alt></picture>",
"always-matching-source-sizes-with-following-source-srcset": "<picture><source srcset='x 100w' sizes=50vw><source srcset=x><img src=x alt></picture>",
# sizes present
"img-srcset-no-descriptor-with-sizes": "<img src=x srcset='x' sizes=50vw alt>",
"img-srcset-w-and-x-width-sizes": "<img src=x srcset='x 100w, y 2x' sizes=50vw alt>",
"source-srcset-x-with-sizes": "<picture><source srcset='x 1x, y 2x' sizes=50vw><img src=x alt></picture>",
"source-srcset-h-with-sizes": "<picture><source srcset='x 100h, y 200h' sizes=50vw><img src=x alt></picture>",
"source-srcset-w-and-x-with-sizes": "<picture><source srcset='x 100w, y 2x' sizes=50vw><img src=x alt></picture>",
"img-with-sizes-no-srcset": "<img sizes=50vw src=foo alt>",
# width descriptor without sizes
"img-srcset-w-no-sizes": "<img srcset='x 100w, y 200w' src=x alt>",
"source-srcset-w-no-sizes": "<picture><source srcset='x 100w, y 200w'><img src=x alt></picture>",
"source-type-srcset-w": "<picture><source srcset='x 100w, y 200w' type=image/gif><img src=x alt></picture>",
# invalid attributes on source
"source-src": "<picture><source src=x><img src=x alt></picture>",
"source-src-srcset": "<picture><source src=x srcset=x><img src=x alt></picture>",
"source-alt": "<picture><source srcset=x alt><img src=x alt></picture>",
"source-width": "<picture><source srcset=x width=100><img src=x alt></picture>",
"source-height": "<picture><source srcset=x height=100><img src=x alt></picture>",
"source-usemap": "<picture><source srcset=x usemap><img src=x alt></picture>",
"source-ismap": "<picture><source srcset=x ismap><img src=x alt></picture>",
"source-crossorigin": "<picture><source srcset=x crossorigin><img src=x alt></picture>",
"source-name": "<picture><source srcset=x crossorigin><img src=x alt></picture>",
"source-align": "<picture><source srcset=x align=left><img src=x alt></picture>",
"source-hspace": "<picture><source srcset=x hspace=1><img src=x alt></picture>",
"source-vspace": "<picture><source srcset=x vspace=1><img src=x alt></picture>",
"source-longdesc": "<picture><source srcset=x longdesc=x><img src=x alt></picture>",
"source-border": "<picture><source srcset=x border=1><img src=x alt></picture>",
# missing srcset on source
"source-no-srcset": "<picture><source><img src=x alt></picture>",
"source-no-srcset-with-sizes": "<picture><source sizes=50vw><img src=x alt></picture>",
"source-no-srcset-with-media": "<picture><source media=screen><img src=x alt></picture>",
"source-no-srcset-with-type": "<picture><source type='image/webp'><img src=x alt></picture>",
# invalid attributes on picture
"picture-src": "<picture src=x><img src=x alt></picture>",
"picture-srcset": "<picture srcset=x><img src=x alt></picture>",
"picture-media": "<picture media=screen><img src=x alt></picture>",
"picture-sizes": "<picture sizes=50vw><img src=x alt></picture>",
"picture-alt": "<picture alt><img src=x alt></picture>",
"picture-width": "<picture width=100><img src=x alt></picture>",
"picture-height": "<picture height=100><img src=x alt></picture>",
"picture-usemap": "<picture usemap><img src=x alt></picture>",
"picture-ismap": "<picture ismap><img src=x alt></picture>",
"picture-crossorigin": "<picture crossorigin><img src=x alt></picture>",
"picture-name": "<picture name=x><img src=x alt></picture>",
"picture-lowsrc": "<picture lowsrc=x><img src=x alt></picture>",
"picture-align": "<picture align=left><img src=x alt></picture>",
"picture-hspace": "<picture hspace=1><img src=x alt></picture>",
"picture-vspace": "<picture vspace=1><img src=x alt></picture>",
"picture-longdesc": "<picture longdesc=x><img src=x alt></picture>",
"picture-border": "<picture border=1><img src=x alt></picture>",
# invalid attributes on source in video
"video-source-srcset": "<video><source srcset=x></video>",
"video-source-srcset-src": "<video><source srcset=x src=x></video>",
"video-source-sizes-srcset": "<video><source sizes=50vw srcset='x 100w'></video>",
"video-source-media-src": "<video><source media=screen src=x></video>",
# srcset on other elements
"link-rel-icon-srcset": "<link rel=icon srcset=x href=x>",
"input-type-image-srcset": "<input type=image src=x srcset=x alt=x>",
"object-srcset": "<object data=x srcset=x></object>",
"video-srcset": "<video src=x srcset=x></video>",
"audio-srcset": "<audio src=x srcset=x></audio>",
"track-srcset": "<video src=x><track src=x srcset=x></video>",
"svg-image-srcset": "<svg><image xlink:href=x srcset=x width=1 height=1 /></svg>",
# invalid attributes on img
"img-type": "<img src=x type=image/gif alt>",
"img-type-with-picture": "<picture><img src=x type=image/gif alt></picture>",
# sizes microsyntax
"sizes-microsyntax-media-all": "<img sizes='all 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-all-and-min-width": "<img sizes='all and (min-width:500px) 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-min-width-no-parenthesis": "<img sizes='min-width:500px 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-general-enclosed-junk": "<img sizes='(123) 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-bad-junk": "<img sizes='(}) 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-two-defaults": "<img sizes='500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-default-first": "<img sizes='100vw, (min-width:500px) 500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-trailing-comma": "<img sizes='(min-width:500px) 500px, 100vw,' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-trailing-junk": "<img sizes='(min-width:500px) 500px, 100vw, foo bar' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-junk-in-default": "<img sizes='(min-width:500px) 500px, 100vw foo bar' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-junk-in-source-size": "<img sizes='(min-width:500px) 500px foo bar, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-percent-in-source-size-value": "<img sizes='(min-width:500px) 50%, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-no-unit-in-source-size-value": "<img sizes='(min-width:500px) 50, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-deg-source-size-value": "<img sizes='1deg' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-grad-source-size-value": "<img sizes='1grad' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-rad-source-size-value": "<img sizes='1rad' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-turn-source-size-value": "<img sizes='1turn' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-s-source-size-value": "<img sizes='1s' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-ms-source-size-value": "<img sizes='1ms' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-hz-source-size-value": "<img sizes='1Hz' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-khz-source-size-value": "<img sizes='1kHz' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-dpi-source-size-value": "<img sizes='1dpi' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-dpcm-source-size-value": "<img sizes='1dpcm' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-dppx-source-size-value": "<img sizes='1dppx' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-auto-source-size-value": "<img sizes='auto' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-inherit-source-size-value": "<img sizes='inherit' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-initial-source-size-value": "<img sizes='initial' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-default-source-size-value": "<img sizes='default' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-foo-bar-source-size-value": "<img sizes='foo-bar' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-negative-source-size-value": "<img sizes='-1px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-empty": "<img sizes='' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-comma": "<img sizes=',' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-css-comment-after-plus": "<img sizes='+/**/50vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-css-comment-before-unit": "<img sizes='50/**/vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientific-notation-negative": "<img sizes='-1e+0px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientific-notation-non-integer-in-exponent": "<img sizes='1e+1.5px' srcset='x 100w, y 200w' src=x alt>",
# srcset microsyntax
"srcset-microsyntax-leading-comma": "<img srcset=',x' src=x alt>",
"srcset-microsyntax-leading-comma-multiple": "<img srcset=',,,x' src=x alt>",
"srcset-microsyntax-trailing-comma": "<img srcset='x,' src=x alt>",
"srcset-microsyntax-trailing-comma-multiple": "<img srcset='x,,,' src=x alt>",
"srcset-microsyntax-broken-url": "<img srcset='http: 1x' src=x alt>",
"srcset-microsyntax-non-integer-w": "<img srcset='x 1.5w' sizes=100vw src=x alt>",
"srcset-microsyntax-uppercase-w": "<img srcset='x 1W' sizes=100vw src=x alt>",
"srcset-microsyntax-plus-w": "<img srcset='x +1w' sizes=100vw src=x alt>",
"srcset-microsyntax-scientific-notation-w": "<img srcset='x 1e0w' sizes=100vw src=x alt>",
"srcset-microsyntax-zero-w": "<img srcset='x 0w' sizes=100vw src=x alt>",
"srcset-microsyntax-negative-zero-w": "<img srcset='x -0w' sizes=100vw src=x alt>",
"srcset-microsyntax-negative-w": "<img srcset='x -1w' sizes=100vw src=x alt>",
"srcset-microsyntax-plus-x": "<img srcset='x +1x' src=x alt>",
"srcset-microsyntax-negative-x": "<img srcset='x -1x' src=x alt>",
"srcset-microsyntax-zero-x": "<img srcset='x 0x' src=x alt>",
"srcset-microsyntax-negative-zero-x": "<img srcset='x -0x' src=x alt>",
"srcset-microsyntax-leading-dot-x": "<img srcset='x .5x' src=x alt>",
"srcset-microsyntax-nan-x": "<img srcset='x NaNx' src=x alt>",
"srcset-microsyntax-infinity-x": "<img srcset='x Infinityx' src=x alt>",
"srcset-microsyntax-x-and-w": "<img srcset='x 1x 1w' sizes=100vw src=x alt>",
"srcset-microsyntax-x-and-h": "<img srcset='x 1x 1h' sizes=100vw src=x alt>",
"srcset-microsyntax-w-and-h": "<img srcset='x 1w 1h' sizes=100vw src=x alt>",
"srcset-microsyntax-h": "<img srcset='x 1h' sizes=100vw src=x alt>",
"srcset-microsyntax-function": "<img srcset='x foobar(baz quux, lol), y 1x' src=x alt>",
"srcset-microsyntax-parenthesis-junk": "<img srcset='x ><(((((o)>, y 1x' src=x alt>",
"srcset-microsyntax-square-bracket-junk": "<img srcset='x [, y 1x' src=x alt>",
"srcset-microsyntax-curly-bracket-junk": "<img srcset='x {, y 1x' src=x alt>",
"srcset-microsyntax-pipe-junk": "<img srcset='x ||, y 1x' src=x alt>",
"srcset-microsyntax-w-and-no-descriptor": "<img srcset='x 1w, y' sizes=100vw src=x alt>",
"srcset-microsyntax-unique-descriptors-1x-and-omitted": "<img srcset='x 1x, y' src=x alt>",
"srcset-microsyntax-unique-descriptors-2x": "<img srcset='x 2x, y 2x' src=x alt>",
"srcset-microsyntax-unique-descriptors-integer-and-decimals-x": "<img srcset='x 1x, y 1.0x' src=x alt>",
"srcset-microsyntax-unique-descriptors-w": "<img srcset='x 1w, y 1w' sizes=100vw src=x alt>",
"srcset-microsyntax-empty": "<img srcset='' src=x alt>",
"srcset-microsyntax-comma": "<img srcset=',' src=x alt>",
"srcset-microsyntax-css-comment-after-descriptor": "<img srcset='x 2x/**/' src=x alt>",
# aria
"picture-aria-role-img": "<picture role=img><img src=x alt></picture>",
"picture-aria-role-button": "<picture role=button><img src=x alt></picture>",
"picture-aria-role-region": "<picture role=region><img src=x alt></picture>",
"picture-aria-role-application": "<picture role=application><img src=x alt></picture>",
"source-aria-role-img": "<picture><source role=img srcset=x><img src=x alt></picture>",
"picture-aria-role-presentation": "<picture role=presentation><img src=x alt></picture>",
"source-aria-role-presentation": "<picture><source role=presentation srcset=x><img src=x alt></picture>",
}
non_errors_in_head = {
"parent-template-in-head": "<template><picture><img src=x alt></picture></template>",
}
non_errors = {
# basic
"basic-img-src": "<img src=x alt>",
"basic-picture-img-src": "<picture><img src=x alt></picture>",
"basic-picture-source": "<picture><source srcset=x><img src=x alt></picture>",
# inter-element whitespace
"inter-element-whitespace": "<picture> <!--x--> <source srcset=x> <!--x--> <img src=x alt> <!--x--> </picture>",
# parents
"parent-p": "<p><picture><img src=x alt></picture></p>",
"parent-h1": "<h1><picture><img src=x alt=x></picture></h1>",
"parent-noscript-in-body": "<noscript><picture><img src=x alt></picture></noscript>",
"parent-object": "<object data=x><picture><img src=x alt></picture></object>",
"parent-video": "<video src=x><picture><img src=x alt></picture></video>",
"parent-section": "<section><h2>x</h2><picture><img src=x alt></picture></section>",
"parent-main": "<main><picture><img src=x alt></picture></main>",
"parent-canvas": "<canvas><picture><img src=x alt></picture></canvas>",
"parent-template-in-body": "<template><picture><img src=x alt></picture></template>",
"parent-ruby": "<ruby><picture><img src=x alt></picture><rt>x</rt></ruby>",
"parent-rt": "<ruby>x<rt><picture><img src=x alt></picture></rt></ruby>",
"parent-rp": "<ruby>x<rp><picture><img src=x alt></picture></rp><rt>x</rt><rp>x</rp></ruby>",
"parent-a": "<a href=x><picture><img src=x alt></picture></a>",
"parent-button": "<button><picture><img src=x alt></picture></button>",
"parent-td": "<table><tr><td><picture><img src=x alt></picture></table>",
# script-supporting elements
"script-first": "<picture><script></script><source srcset=x><img src=x alt></picture>",
"template-first": "<picture><template></template><source srcset=x><img src=x alt></picture>",
"script-between": "<picture><source srcset=x><script></script><img src=x alt></picture>",
"script-after": "<picture><source srcset=x><img src=x alt><script></script></picture>",
"script-before-after": "<picture><script></script><source srcset=x><img src=x alt><script></script></picture>",
"script-before-between-after": "<picture><script></script><source srcset=x><script></script><img src=x alt><script></script></picture>",
"script-and-template": "<picture><template></template><source srcset=x><script></script><img src=x alt><template></template></picture>",
# source with following sibling source element or img element with a srcset attribute
"source-with-media-img-with-srcset": "<picture><source srcset=x media=screen><img src=x srcset=x alt></picture>",
"source-with-media-uppercase-img-with-srcset": "<picture><source srcset=x media=SCREEN><img src=x srcset=x alt></picture>",
"source-with-media-spaces-img-with-srcset": "<picture><source srcset=x media=' \n\tscreen \n\t'><img src=x srcset=x alt></picture>",
"source-with-media-source-with-srcset": "<picture><source srcset=x media=screen><source srcset=x><img src=x alt></picture>",
"source-with-type-img-with-srcset": "<picture><source srcset=x type=image/gif><img src=x srcset=x alt></picture>",
"source-with-type-source-with-srcset": "<picture><source srcset=x type=image/gif><source srcset=x><img src=x alt></picture>",
# sizes present
"img-with-sizes": "<img srcset='x 100w, y 200w' sizes=50vw src=x alt>",
"source-with-sizes": "<picture><source srcset='x 100w, y 200w' sizes=50vw><img src=x alt></picture>",
# embed allows any attributes
"embed-srcset-empty": "<embed srcset>",
"embed-srcset-junk": "<embed srcset='foo bar'>",
"embed-sizes-empty": "<embed sizes>",
"embed-sizes-junk": "<embed sizes='foo bar'>",
# img src also in srcset
"img-src-also-in-srcset-1x": "<img src=x srcset='x 1x, y 2x' alt>",
"img-src-also-in-srcset-2x": "<img src=x srcset='y 1x, x 2x' alt>",
"img-src-also-in-srcset-w": "<img src=x srcset='x 100w, y 200w' sizes=100vw alt>",
# img src not in srcset
"img-src-not-in-srcset-x": "<img src=x srcset='y 1x, z 2x' alt>",
"img-src-not-in-srcset-w": "<img src=x srcset='y 100w, z 200w' sizes=100vw alt>",
# source type
"source-type": "<picture><source srcset=x type=image/gif><img src=x alt></picture>",
"source-type-srcset-x": "<picture><source srcset='x 1x, y 2x' type=image/gif><img src=x alt></picture>",
"source-type-srcset-w-sizes": "<picture><source srcset='x 100w, y 200w' type=image/gif sizes=50vw><img src=x alt></picture>",
# sizes microsyntax
"sizes-microsyntax-media-min-width": "<img sizes='(min-width:500px) 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-multiple-source-sizes": "<img sizes='(min-width:1500px) 500px, (min-width:1000px) 33vw, (min-width:500px) 50vw, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-no-default": "<img sizes='(min-width:500px) 500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-not-and": "<img sizes='not (width:500px) and (width:500px) 500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-only-default": "<img sizes='500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-calc-in-default": "<img sizes='calc(500px)' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-calc-in-source-size-value": "<img sizes='(min-width:500px) calc(500px)' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-calc-in-media": "<img sizes='(min-width:calc(500px)) 500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-zero": "<img sizes='0' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-minus-zero": "<img sizes='-0' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-em-in-source-size-value": "<img sizes='1em' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-ex-in-source-size-value": "<img sizes='1ex' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-ch-in-source-size-value": "<img sizes='1ch' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-rem-in-source-size-value": "<img sizes='1rem' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-vw-in-source-size-value": "<img sizes='1vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-vh-in-source-size-value": "<img sizes='1vh' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-vmin-in-source-size-value": "<img sizes='1vmin' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-vmax-in-source-size-value": "<img sizes='1vmax' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-cm-in-source-size-value": "<img sizes='1cm' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-mm-in-source-size-value": "<img sizes='1mm' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-q-in-source-size-value": "<img sizes='1q' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-in-in-source-size-value": "<img sizes='1in' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-pc-in-source-size-value": "<img sizes='1pc' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-pt-in-source-size-value": "<img sizes='1pt' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-px-in-source-size-value": "<img sizes='1px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-non-integer-px-in-source-size-value": "<img sizes='0.2px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-leading-css-comment": "<img sizes='/**/50vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-trailing-css-comment": "<img sizes='50vw/**/' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-plus": "<img sizes='+50vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-non-integer-omitted-zero": "<img sizes='.2px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-0": "<img sizes='-0e-0px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-1": "<img sizes='+11.11e+11px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-2": "<img sizes='2.2e2px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-3": "<img sizes='33E33px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-4": "<img sizes='.4E4px' srcset='x 100w, y 200w' src=x alt>",
# srcset microsyntax
"srcset-microsyntax-comma-in-url": "<img srcset='x,x' src=x alt>",
"srcset-microsyntax-percent-escaped-leading-comma-in-url": "<img srcset='%2Cx' src=x alt>",
"srcset-microsyntax-percent-escaped-trailing-comma-in-url": "<img srcset='x%2C' src=x alt>",
"srcset-microsyntax-percent-escaped-space-in-url": "<img srcset='%20' src=x alt>",
"srcset-microsyntax-w": "<img srcset='x 1w' sizes=100vw src=x alt>",
"srcset-microsyntax-x": "<img srcset='x 1x' src=x alt>",
"srcset-microsyntax-non-integer-x": "<img srcset='x 1.5x' src=x alt>",
"srcset-microsyntax-scientific-notation-x": "<img srcset='x 1e0x' src=x alt>",
"srcset-microsyntax-scientific-notation-decimals-x": "<img srcset='x 1.5e0x' src=x alt>",
"srcset-microsyntax-scientific-notation-e-plus-x": "<img srcset='x 1e+0x' src=x alt>",
"srcset-microsyntax-scientific-notation-e-minus-x": "<img srcset='x 1e-0x' src=x alt>",
"srcset-microsyntax-scientific-notation-e-uppercase-x": "<img srcset='x 1E0x' src=x alt>",
"srcset-microsyntax-no-space-between-candidates": "<img srcset='x 1x,y 2x' src=x alt>",
# valid attributes on img in picture
"img-crossorigin-with-picture": "<picture><img crossorigin src=x alt></picture>",
"img-usemap-with-picture": "<picture><img usemap=#x src=x alt></picture><map name=x></map>",
"img-ismap-with-picture": "<a href=x><picture><img ismap src=x alt></picture></a>",
"img-width-height-with-picture": "<picture><img src=x alt width=1 height=1></picture>",
"img-width-height-zero-with-picture": "<picture><img src=x alt width=0 height=0></picture>",
# global attributes on picture
"picture-global-attributes": "<picture title=x class=x dir=ltr hidden id=asdf tabindex=0><img src=x alt></picture>",
}
for key in errors.keys():
template_error = template
template_error += '<title>invalid %s</title>\n' % key
template_error += errors[key]
file = open(os.path.join(ccdir, "html/elements/picture/%s-novalid.html" % key), 'wb')
file.write(template_error)
file.close()
file = open(os.path.join(ccdir, "html/elements/picture/picture-isvalid.html"), 'wb')
file.write(template + '<title>valid picture</title>\n')
for key in non_errors_in_head.keys():
file.write('%s <!-- %s -->\n' % (non_errors_in_head[key], key))
file.write('<body>\n')
for key in non_errors.keys():
file.write('%s <!-- %s -->\n' % (non_errors[key], key))
file.close()
# vim: ts=4:sw=4
| mpl-2.0 |
andykimpe/chromium-test-npapi | tools/telemetry/telemetry/core/backends/form_based_credentials_backend.py | 43 | 3281 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import util
def _WaitForLoginFormToLoad(backend, login_form_id, tab):
def IsFormLoadedOrAlreadyLoggedIn():
return (tab.EvaluateJavaScript(
'document.querySelector("#%s")!== null' % login_form_id) or
backend.IsAlreadyLoggedIn(tab))
# Wait until the form is submitted and the page completes loading.
util.WaitFor(IsFormLoadedOrAlreadyLoggedIn, 60)
def _SubmitFormAndWait(form_id, tab):
tab.ExecuteJavaScript(
'document.getElementById("%s").submit();' % form_id)
def FinishedLoading():
return not tab.EvaluateJavaScript(
'document.querySelector("#%s") !== null' % form_id)
# Wait until the form is submitted and the page completes loading.
util.WaitFor(FinishedLoading, 60)
class FormBasedCredentialsBackend(object):
def __init__(self):
self._logged_in = False
def IsAlreadyLoggedIn(self, tab):
raise NotImplementedError()
@property
def credentials_type(self):
raise NotImplementedError()
@property
def url(self):
raise NotImplementedError()
@property
def login_form_id(self):
raise NotImplementedError()
@property
def login_input_id(self):
raise NotImplementedError()
@property
def password_input_id(self):
raise NotImplementedError()
def IsLoggedIn(self):
return self._logged_in
def _ResetLoggedInState(self):
"""Makes the backend think we're not logged in even though we are.
Should only be used in unit tests to simulate --dont-override-profile.
"""
self._logged_in = False
def LoginNeeded(self, tab, config):
"""Logs in to a test account.
Raises:
RuntimeError: if could not get credential information.
"""
if self._logged_in:
return True
if 'username' not in config or 'password' not in config:
message = ('Credentials for "%s" must include username and password.' %
self.credentials_type)
raise RuntimeError(message)
logging.debug('Logging into %s account...' % self.credentials_type)
if 'url' in config:
url = config['url']
else:
url = self.url
try:
logging.info('Loading %s...', url)
tab.Navigate(url)
_WaitForLoginFormToLoad(self, self.login_form_id, tab)
if self.IsAlreadyLoggedIn(tab):
self._logged_in = True
return True
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
logging.info('Loaded page: %s', url)
email_id = 'document.querySelector("#%s").%s.value = "%s"; ' % (
self.login_form_id, self.login_input_id, config['username'])
password = 'document.querySelector("#%s").%s.value = "%s"; ' % (
self.login_form_id, self.password_input_id, config['password'])
tab.ExecuteJavaScript(email_id)
tab.ExecuteJavaScript(password)
_SubmitFormAndWait(self.login_form_id, tab)
self._logged_in = True
return True
except util.TimeoutException:
logging.warning('Timed out while loading: %s', url)
return False
def LoginNoLongerNeeded(self, tab): # pylint: disable=W0613
assert self._logged_in
| bsd-3-clause |
PengFeiLi/ns3 | .waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Logs.py | 9 | 5484 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,traceback,sys
from waflib import Utils,ansiterm
if not os.environ.get('NOSYNC',False):
if sys.stdout.isatty()and id(sys.stdout)==id(sys.__stdout__):
sys.stdout=ansiterm.AnsiTerm(sys.stdout)
if sys.stderr.isatty()and id(sys.stderr)==id(sys.__stderr__):
sys.stderr=ansiterm.AnsiTerm(sys.stderr)
import logging
LOG_FORMAT=os.environ.get('WAF_LOG_FORMAT','%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s')
HOUR_FORMAT=os.environ.get('WAF_HOUR_FORMAT','%H:%M:%S')
zones=''
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','GREY':'\x1b[37m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
indicator='\r\x1b[K%s%s%s'
try:
unicode
except NameError:
unicode=None
def enable_colors(use):
if use==1:
if not(sys.stderr.isatty()or sys.stdout.isatty()):
use=0
if Utils.is_win32 and os.name!='java':
term=os.environ.get('TERM','')
else:
term=os.environ.get('TERM','dumb')
if term in('dumb','emacs'):
use=0
if use>=1:
os.environ['TERM']='vt100'
colors_lst['USE']=use
try:
get_term_cols=ansiterm.get_term_cols
except AttributeError:
def get_term_cols():
return 80
get_term_cols.__doc__="""
Get the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if not colors_lst['USE']:return''
return colors_lst.get(cl,'')
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=None):
pass
def filter(self,rec):
rec.zone=rec.module
if rec.levelno>=logging.INFO:
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class log_handler(logging.StreamHandler):
def emit(self,record):
try:
try:
self.stream=record.stream
except AttributeError:
if record.levelno>=logging.WARNING:
record.stream=self.stream=sys.stderr
else:
record.stream=self.stream=sys.stdout
self.emit_override(record)
self.flush()
except(KeyboardInterrupt,SystemExit):
raise
except:
self.handleError(record)
def emit_override(self,record,**kw):
self.terminator=getattr(record,'terminator','\n')
stream=self.stream
if unicode:
msg=self.formatter.format(record)
fs='%s'+self.terminator
try:
if(isinstance(msg,unicode)and getattr(stream,'encoding',None)):
fs=fs.decode(stream.encoding)
try:
stream.write(fs%msg)
except UnicodeEncodeError:
stream.write((fs%msg).encode(stream.encoding))
else:
stream.write(fs%msg)
except UnicodeError:
stream.write((fs%msg).encode("UTF-8"))
else:
logging.StreamHandler.emit(self,record)
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
use=colors_lst['USE']
if(use==1 and rec.stream.isatty())or use==2:
c1=getattr(rec,'c1',None)
if c1 is None:
c1=''
if rec.levelno>=logging.ERROR:
c1=colors.RED
elif rec.levelno>=logging.WARNING:
c1=colors.YELLOW
elif rec.levelno>=logging.INFO:
c1=colors.GREEN
c2=getattr(rec,'c2',colors.NORMAL)
msg='%s%s%s'%(c1,msg,c2)
else:
msg=msg.replace('\r','\n')
msg=re.sub(r'\x1B\[(K|.*?(m|h|l))','',msg)
if rec.levelno>=logging.INFO:
return msg
rec.msg=msg
rec.c1=colors.PINK
rec.c2=colors.NORMAL
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File "%s", line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error("\n".join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=8192):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col,msg,label='',sep='\n'):
info("%s%s%s %s"%(colors(col),msg,colors.NORMAL,label),extra={'terminator':sep})
| gpl-2.0 |
panzer13/shadowsocks | tests/coverage_server.py | 1072 | 1655 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
if __name__ == '__main__':
import tornado.ioloop
import tornado.web
import urllib
class MainHandler(tornado.web.RequestHandler):
def get(self, project):
try:
with open('/tmp/%s-coverage' % project, 'rb') as f:
coverage = f.read().strip()
n = int(coverage.strip('%'))
if n >= 80:
color = 'brightgreen'
else:
color = 'yellow'
self.redirect(('https://img.shields.io/badge/'
'coverage-%s-%s.svg'
'?style=flat') %
(urllib.quote(coverage), color))
except IOError:
raise tornado.web.HTTPError(404)
application = tornado.web.Application([
(r"/([a-zA-Z0-9\-_]+)", MainHandler),
])
if __name__ == "__main__":
application.listen(8888, address='127.0.0.1')
tornado.ioloop.IOLoop.instance().start()
| apache-2.0 |
deeprave/melbdjango-project | contacts/migrations/0002_auto_20150908_1414.py | 2 | 1413 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contacts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='address',
name='address_1',
field=models.CharField(max_length=1022),
),
migrations.AlterField(
model_name='address',
name='address_2',
field=models.CharField(max_length=1022, null=True, blank=True),
),
migrations.AlterField(
model_name='address',
name='contact_name',
field=models.TextField(max_length=1022, null=True, blank=True),
),
migrations.AlterField(
model_name='address',
name='title',
field=models.CharField(max_length=254, null=True, blank=True),
),
migrations.AlterField(
model_name='contact',
name='title',
field=models.CharField(choices=[(None, 'None'), ('Mr.', 'MR'), ('Miss', 'MISS'), ('Ms.', 'MS'), ('Mrs.', 'MRS'), ('Dr.', 'DR'), ('Prof.', 'PROF'), ('Rev.', 'REV'), ('Lt.', 'LT'), ('Cpt.', 'CPT'), ('Maj.', 'MAJ'), ('Gen.', 'GEN'), ('Esq', 'ESQ'), ('Sr.', 'SR'), ('Jr.', 'JR'), ('Hon.', 'HON'), ('Rt. Hon.', 'RT HON')], max_length=16, null=True, blank=True),
),
]
| cc0-1.0 |
pombredanne/djorm-ext-pgfulltext | djorm_pgfulltext/tests/models.py | 6 | 1674 | # -*- coding: utf-8 -*-
from django.db import models
from ..fields import VectorField
from ..models import SearchManager
class Person(models.Model):
name = models.CharField(max_length=32)
description = models.TextField()
search_index = VectorField()
objects = SearchManager(
fields=('name', 'description'),
search_field = 'search_index',
config = 'names',
)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
super(Person, self).save(*args, **kwargs)
self.update_search_field()
class Person2(models.Model):
name = models.CharField(max_length=32)
description = models.TextField()
search_index = VectorField()
objects = SearchManager(
fields=(('name', 'A'), ('description', 'B')),
search_field = 'search_index',
config = 'names',
)
def __unicode__(self):
return self.name
class Person3(models.Model):
name = models.CharField(max_length=32)
description = models.TextField()
search_index = VectorField()
objects = SearchManager(
fields=('name', 'description'),
search_field = 'search_index',
auto_update_search_field = True,
config = 'names'
)
def __unicode__(self):
return self.name
class Book(models.Model):
author = models.ForeignKey(Person)
name = models.CharField(max_length=32)
search_index = VectorField()
objects = SearchManager(
fields=('name',),
search_field = 'search_index',
auto_update_search_field = True,
config = 'names'
)
def __unicode__(self):
return self.name
| bsd-3-clause |
s20121035/rk3288_android5.1_repo | external/chromium_org/build/android/pylib/utils/apk_helper.py | 52 | 2208 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing utilities for apk packages."""
import os.path
import re
from pylib import cmd_helper
from pylib import constants
_AAPT_PATH = os.path.join(constants.ANDROID_SDK_TOOLS, 'aapt')
_MANIFEST_ATTRIBUTE_RE = re.compile(
r'\s*A: ([^\(\)= ]*)\([^\(\)= ]*\)="(.*)" \(Raw: .*\)$')
_MANIFEST_ELEMENT_RE = re.compile(r'\s*(?:E|N): (\S*) .*$')
def GetPackageName(apk_path):
"""Returns the package name of the apk."""
aapt_cmd = [_AAPT_PATH, 'dump', 'badging', apk_path]
aapt_output = cmd_helper.GetCmdOutput(aapt_cmd).split('\n')
package_name_re = re.compile(r'package: .*name=\'(\S*)\'')
for line in aapt_output:
m = package_name_re.match(line)
if m:
return m.group(1)
raise Exception('Failed to determine package name of %s' % apk_path)
def _ParseManifestFromApk(apk_path):
aapt_cmd = [_AAPT_PATH, 'dump', 'xmltree', apk_path, 'AndroidManifest.xml']
aapt_output = cmd_helper.GetCmdOutput(aapt_cmd).split('\n')
parsed_manifest = {}
node_stack = [parsed_manifest]
indent = ' '
for line in aapt_output[1:]:
if len(line) == 0:
continue
indent_depth = 0
while line[(len(indent) * indent_depth):].startswith(indent):
indent_depth += 1
node_stack = node_stack[:indent_depth]
node = node_stack[-1]
m = _MANIFEST_ELEMENT_RE.match(line[len(indent) * indent_depth:])
if m:
if not m.group(1) in node:
node[m.group(1)] = {}
node_stack += [node[m.group(1)]]
continue
m = _MANIFEST_ATTRIBUTE_RE.match(line[len(indent) * indent_depth:])
if m:
if not m.group(1) in node:
node[m.group(1)] = []
node[m.group(1)].append(m.group(2))
continue
return parsed_manifest
def GetInstrumentationName(
apk_path, default='android.test.InstrumentationTestRunner'):
"""Returns the name of the Instrumentation in the apk."""
try:
manifest_info = _ParseManifestFromApk(apk_path)
return manifest_info['manifest']['instrumentation']['android:name'][0]
except KeyError:
return default
| gpl-3.0 |
eunchong/build | third_party/twisted_10_2/twisted/conch/ttymodes.py | 64 | 2249 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
import tty
# this module was autogenerated.
VINTR = 1
VQUIT = 2
VERASE = 3
VKILL = 4
VEOF = 5
VEOL = 6
VEOL2 = 7
VSTART = 8
VSTOP = 9
VSUSP = 10
VDSUSP = 11
VREPRINT = 12
VWERASE = 13
VLNEXT = 14
VFLUSH = 15
VSWTCH = 16
VSTATUS = 17
VDISCARD = 18
IGNPAR = 30
PARMRK = 31
INPCK = 32
ISTRIP = 33
INLCR = 34
IGNCR = 35
ICRNL = 36
IUCLC = 37
IXON = 38
IXANY = 39
IXOFF = 40
IMAXBEL = 41
ISIG = 50
ICANON = 51
XCASE = 52
ECHO = 53
ECHOE = 54
ECHOK = 55
ECHONL = 56
NOFLSH = 57
TOSTOP = 58
IEXTEN = 59
ECHOCTL = 60
ECHOKE = 61
PENDIN = 62
OPOST = 70
OLCUC = 71
ONLCR = 72
OCRNL = 73
ONOCR = 74
ONLRET = 75
CS7 = 90
CS8 = 91
PARENB = 92
PARODD = 93
TTY_OP_ISPEED = 128
TTY_OP_OSPEED = 129
TTYMODES = {
1 : 'VINTR',
2 : 'VQUIT',
3 : 'VERASE',
4 : 'VKILL',
5 : 'VEOF',
6 : 'VEOL',
7 : 'VEOL2',
8 : 'VSTART',
9 : 'VSTOP',
10 : 'VSUSP',
11 : 'VDSUSP',
12 : 'VREPRINT',
13 : 'VWERASE',
14 : 'VLNEXT',
15 : 'VFLUSH',
16 : 'VSWTCH',
17 : 'VSTATUS',
18 : 'VDISCARD',
30 : (tty.IFLAG, 'IGNPAR'),
31 : (tty.IFLAG, 'PARMRK'),
32 : (tty.IFLAG, 'INPCK'),
33 : (tty.IFLAG, 'ISTRIP'),
34 : (tty.IFLAG, 'INLCR'),
35 : (tty.IFLAG, 'IGNCR'),
36 : (tty.IFLAG, 'ICRNL'),
37 : (tty.IFLAG, 'IUCLC'),
38 : (tty.IFLAG, 'IXON'),
39 : (tty.IFLAG, 'IXANY'),
40 : (tty.IFLAG, 'IXOFF'),
41 : (tty.IFLAG, 'IMAXBEL'),
50 : (tty.LFLAG, 'ISIG'),
51 : (tty.LFLAG, 'ICANON'),
52 : (tty.LFLAG, 'XCASE'),
53 : (tty.LFLAG, 'ECHO'),
54 : (tty.LFLAG, 'ECHOE'),
55 : (tty.LFLAG, 'ECHOK'),
56 : (tty.LFLAG, 'ECHONL'),
57 : (tty.LFLAG, 'NOFLSH'),
58 : (tty.LFLAG, 'TOSTOP'),
59 : (tty.LFLAG, 'IEXTEN'),
60 : (tty.LFLAG, 'ECHOCTL'),
61 : (tty.LFLAG, 'ECHOKE'),
62 : (tty.LFLAG, 'PENDIN'),
70 : (tty.OFLAG, 'OPOST'),
71 : (tty.OFLAG, 'OLCUC'),
72 : (tty.OFLAG, 'ONLCR'),
73 : (tty.OFLAG, 'OCRNL'),
74 : (tty.OFLAG, 'ONOCR'),
75 : (tty.OFLAG, 'ONLRET'),
# 90 : (tty.CFLAG, 'CS7'),
# 91 : (tty.CFLAG, 'CS8'),
92 : (tty.CFLAG, 'PARENB'),
93 : (tty.CFLAG, 'PARODD'),
128 : 'ISPEED',
129 : 'OSPEED'
}
| bsd-3-clause |
greasypizza/grpc | tools/distrib/python/grpcio_tools/protoc_lib_deps.py | 14 | 10228 |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED BY make_grpcio_tools.py!
CC_FILES=['google/protobuf/compiler/zip_writer.cc', 'google/protobuf/compiler/subprocess.cc', 'google/protobuf/compiler/ruby/ruby_generator.cc', 'google/protobuf/compiler/python/python_generator.cc', 'google/protobuf/compiler/plugin.pb.cc', 'google/protobuf/compiler/plugin.cc', 'google/protobuf/compiler/php/php_generator.cc', 'google/protobuf/compiler/objectivec/objectivec_primitive_field.cc', 'google/protobuf/compiler/objectivec/objectivec_oneof.cc', 'google/protobuf/compiler/objectivec/objectivec_message_field.cc', 'google/protobuf/compiler/objectivec/objectivec_message.cc', 'google/protobuf/compiler/objectivec/objectivec_map_field.cc', 'google/protobuf/compiler/objectivec/objectivec_helpers.cc', 'google/protobuf/compiler/objectivec/objectivec_generator.cc', 'google/protobuf/compiler/objectivec/objectivec_file.cc', 'google/protobuf/compiler/objectivec/objectivec_field.cc', 'google/protobuf/compiler/objectivec/objectivec_extension.cc', 'google/protobuf/compiler/objectivec/objectivec_enum_field.cc', 'google/protobuf/compiler/objectivec/objectivec_enum.cc', 'google/protobuf/compiler/js/js_generator.cc', 'google/protobuf/compiler/javanano/javanano_primitive_field.cc', 'google/protobuf/compiler/javanano/javanano_message_field.cc', 'google/protobuf/compiler/javanano/javanano_message.cc', 'google/protobuf/compiler/javanano/javanano_map_field.cc', 'google/protobuf/compiler/javanano/javanano_helpers.cc', 'google/protobuf/compiler/javanano/javanano_generator.cc', 'google/protobuf/compiler/javanano/javanano_file.cc', 'google/protobuf/compiler/javanano/javanano_field.cc', 'google/protobuf/compiler/javanano/javanano_extension.cc', 'google/protobuf/compiler/javanano/javanano_enum_field.cc', 'google/protobuf/compiler/javanano/javanano_enum.cc', 'google/protobuf/compiler/java/java_string_field_lite.cc', 'google/protobuf/compiler/java/java_string_field.cc', 'google/protobuf/compiler/java/java_shared_code_generator.cc', 'google/protobuf/compiler/java/java_service.cc', 'google/protobuf/compiler/java/java_primitive_field_lite.cc', 'google/protobuf/compiler/java/java_primitive_field.cc', 'google/protobuf/compiler/java/java_name_resolver.cc', 'google/protobuf/compiler/java/java_message_lite.cc', 'google/protobuf/compiler/java/java_message_field_lite.cc', 'google/protobuf/compiler/java/java_message_field.cc', 'google/protobuf/compiler/java/java_message_builder_lite.cc', 'google/protobuf/compiler/java/java_message_builder.cc', 'google/protobuf/compiler/java/java_message.cc', 'google/protobuf/compiler/java/java_map_field_lite.cc', 'google/protobuf/compiler/java/java_map_field.cc', 'google/protobuf/compiler/java/java_lazy_message_field_lite.cc', 'google/protobuf/compiler/java/java_lazy_message_field.cc', 'google/protobuf/compiler/java/java_helpers.cc', 'google/protobuf/compiler/java/java_generator_factory.cc', 'google/protobuf/compiler/java/java_generator.cc', 'google/protobuf/compiler/java/java_file.cc', 'google/protobuf/compiler/java/java_field.cc', 'google/protobuf/compiler/java/java_extension_lite.cc', 'google/protobuf/compiler/java/java_extension.cc', 'google/protobuf/compiler/java/java_enum_lite.cc', 'google/protobuf/compiler/java/java_enum_field_lite.cc', 'google/protobuf/compiler/java/java_enum_field.cc', 'google/protobuf/compiler/java/java_enum.cc', 'google/protobuf/compiler/java/java_doc_comment.cc', 'google/protobuf/compiler/java/java_context.cc', 'google/protobuf/compiler/csharp/csharp_wrapper_field.cc', 'google/protobuf/compiler/csharp/csharp_source_generator_base.cc', 'google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_message_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_reflection_class.cc', 'google/protobuf/compiler/csharp/csharp_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_message_field.cc', 'google/protobuf/compiler/csharp/csharp_message.cc', 'google/protobuf/compiler/csharp/csharp_map_field.cc', 'google/protobuf/compiler/csharp/csharp_helpers.cc', 'google/protobuf/compiler/csharp/csharp_generator.cc', 'google/protobuf/compiler/csharp/csharp_field_base.cc', 'google/protobuf/compiler/csharp/csharp_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_enum.cc', 'google/protobuf/compiler/csharp/csharp_doc_comment.cc', 'google/protobuf/compiler/cpp/cpp_string_field.cc', 'google/protobuf/compiler/cpp/cpp_service.cc', 'google/protobuf/compiler/cpp/cpp_primitive_field.cc', 'google/protobuf/compiler/cpp/cpp_message_field.cc', 'google/protobuf/compiler/cpp/cpp_message.cc', 'google/protobuf/compiler/cpp/cpp_map_field.cc', 'google/protobuf/compiler/cpp/cpp_helpers.cc', 'google/protobuf/compiler/cpp/cpp_generator.cc', 'google/protobuf/compiler/cpp/cpp_file.cc', 'google/protobuf/compiler/cpp/cpp_field.cc', 'google/protobuf/compiler/cpp/cpp_extension.cc', 'google/protobuf/compiler/cpp/cpp_enum_field.cc', 'google/protobuf/compiler/cpp/cpp_enum.cc', 'google/protobuf/compiler/command_line_interface.cc', 'google/protobuf/compiler/code_generator.cc', 'google/protobuf/wrappers.pb.cc', 'google/protobuf/wire_format.cc', 'google/protobuf/util/type_resolver_util.cc', 'google/protobuf/util/time_util.cc', 'google/protobuf/util/message_differencer.cc', 'google/protobuf/util/json_util.cc', 'google/protobuf/util/internal/utility.cc', 'google/protobuf/util/internal/type_info_test_helper.cc', 'google/protobuf/util/internal/type_info.cc', 'google/protobuf/util/internal/protostream_objectwriter.cc', 'google/protobuf/util/internal/protostream_objectsource.cc', 'google/protobuf/util/internal/proto_writer.cc', 'google/protobuf/util/internal/object_writer.cc', 'google/protobuf/util/internal/json_stream_parser.cc', 'google/protobuf/util/internal/json_objectwriter.cc', 'google/protobuf/util/internal/json_escaping.cc', 'google/protobuf/util/internal/field_mask_utility.cc', 'google/protobuf/util/internal/error_listener.cc', 'google/protobuf/util/internal/default_value_objectwriter.cc', 'google/protobuf/util/internal/datapiece.cc', 'google/protobuf/util/field_mask_util.cc', 'google/protobuf/util/field_comparator.cc', 'google/protobuf/unknown_field_set.cc', 'google/protobuf/type.pb.cc', 'google/protobuf/timestamp.pb.cc', 'google/protobuf/text_format.cc', 'google/protobuf/stubs/substitute.cc', 'google/protobuf/stubs/mathlimits.cc', 'google/protobuf/struct.pb.cc', 'google/protobuf/source_context.pb.cc', 'google/protobuf/service.cc', 'google/protobuf/reflection_ops.cc', 'google/protobuf/message.cc', 'google/protobuf/map_field.cc', 'google/protobuf/io/zero_copy_stream_impl.cc', 'google/protobuf/io/tokenizer.cc', 'google/protobuf/io/strtod.cc', 'google/protobuf/io/printer.cc', 'google/protobuf/io/gzip_stream.cc', 'google/protobuf/generated_message_reflection.cc', 'google/protobuf/field_mask.pb.cc', 'google/protobuf/extension_set_heavy.cc', 'google/protobuf/empty.pb.cc', 'google/protobuf/dynamic_message.cc', 'google/protobuf/duration.pb.cc', 'google/protobuf/descriptor_database.cc', 'google/protobuf/descriptor.pb.cc', 'google/protobuf/descriptor.cc', 'google/protobuf/compiler/parser.cc', 'google/protobuf/compiler/importer.cc', 'google/protobuf/api.pb.cc', 'google/protobuf/any.pb.cc', 'google/protobuf/any.cc', 'google/protobuf/wire_format_lite.cc', 'google/protobuf/stubs/time.cc', 'google/protobuf/stubs/strutil.cc', 'google/protobuf/stubs/structurally_valid.cc', 'google/protobuf/stubs/stringprintf.cc', 'google/protobuf/stubs/stringpiece.cc', 'google/protobuf/stubs/statusor.cc', 'google/protobuf/stubs/status.cc', 'google/protobuf/stubs/once.cc', 'google/protobuf/stubs/int128.cc', 'google/protobuf/stubs/common.cc', 'google/protobuf/stubs/bytestream.cc', 'google/protobuf/stubs/atomicops_internals_x86_msvc.cc', 'google/protobuf/stubs/atomicops_internals_x86_gcc.cc', 'google/protobuf/repeated_field.cc', 'google/protobuf/message_lite.cc', 'google/protobuf/io/zero_copy_stream_impl_lite.cc', 'google/protobuf/io/zero_copy_stream.cc', 'google/protobuf/io/coded_stream.cc', 'google/protobuf/generated_message_util.cc', 'google/protobuf/extension_set.cc', 'google/protobuf/arenastring.cc', 'google/protobuf/arena.cc']
PROTO_FILES=['google/protobuf/wrappers.proto', 'google/protobuf/type.proto', 'google/protobuf/timestamp.proto', 'google/protobuf/struct.proto', 'google/protobuf/source_context.proto', 'google/protobuf/field_mask.proto', 'google/protobuf/empty.proto', 'google/protobuf/duration.proto', 'google/protobuf/descriptor.proto', 'google/protobuf/compiler/plugin.proto', 'google/protobuf/api.proto', 'google/protobuf/any.proto']
CC_INCLUDE='third_party/protobuf/src'
PROTO_INCLUDE='third_party/protobuf/src'
| bsd-3-clause |
mathiasertl/django-xmpp-server-test | xmpp_server_test/server_test/xmpp/clients.py | 1 | 15044 | # -*- coding: utf-8 -*-
#
# This file is part of django-xmpp-server-test
# (https://github.com/mathiasertl/django-xmpp-server-test).
#
# django-xmpp-server-test is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# django-xmpp-server-test is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# django-xmpp-server-test. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
from django.conf import settings
from sleekxmpp.basexmpp import BaseXMPP
from sleekxmpp.clientxmpp import ClientXMPP
from sleekxmpp.exceptions import IqError
from sleekxmpp.plugins.base import load_plugin
from sleekxmpp.stanza import StreamFeatures
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import MatchXPath
#from .plugins import amp
#from .plugins import auth
#from .plugins import bind
from .plugins import caps
from .plugins import compression
from .plugins import csi
from .plugins import dialback
from .plugins import register
from .plugins import rosterver
#from .plugins import session
from .plugins import sm
log = logging.getLogger(__name__)
_feature_mappings = {
'0012': 'jabber:iq:last',
'0016': 'jabber:iq:privacy',
'0039': 'http://jabber.org/protocol/stats',
'0049': 'jabber:iq:private',
'0050': 'http://jabber.org/protocol/commands',
'0054': 'vcard-temp',
'0060': 'http://jabber.org/protocol/pubsub',
'0090': 'jabber:iq:time',
'0160': 'msgoffline',
'0191': 'urn:xmpp:blocking',
'0199': 'urn:xmpp:ping',
'0202': 'urn:xmpp:time',
'0280': 'urn:xmpp:carbons:2',
'0313': 'urn:xmpp:mam:0',
}
class StreamFeatureMixin(object):
def handle_stream_feature(self, xep, feature, kind='status', default=False):
stanza = self._stream_features.pop(feature, None)
if default is False:
self.test.data['xeps'][xep][kind] = stanza is not None
else:
self.test.data['xeps'][xep][kind] = stanza is not None or default
def handle_tls_stream_feature(self, kind):
stanza = self._stream_features.pop('starttls', None)
if stanza is not None:
if stanza.find('{%s}required' % stanza.namespace) is None:
self.test.data['core']['tls'][kind] = 'optional'
else:
self.test.data['core']['tls'][kind] = 'required'
else:
self.test.data['core']['tls'][kind] = False
def handle_compression_stream_feature(self, kind):
stanza = self._stream_features.pop('compression', None)
if stanza is not None:
methods = [n.text for n in stanza.findall('{%s}method' % stanza.namespace)]
self.test.data['xeps']['0138'][kind] = methods
else:
self.test.data['xeps']['0138'][kind] = []
class StreamFeatureClient(ClientXMPP, StreamFeatureMixin):
def __init__(self, test, *args, **kwargs):
super(StreamFeatureClient, self).__init__(*args, **kwargs)
self.use_ipv6 = settings.USE_IP6
self.auto_reconnect = False
self.test = test
# disable the stock rosterver plugina
registered_features = {f: p for p, f in self._stream_feature_order}
for feature in ['rosterver']:
if feature in registered_features:
self.unregister_feature(feature, registered_features[feature])
# self.unregister_feature('bind', 10000)
# self.unregister_feature('session', 10001)
self.replace_plugin('feature_rosterver', rosterver)
# self.replace_plugin('feature_bind', bind)
# self.replace_plugin('feature_session', session)
# register additional known plugins
self.register_plugin('feature_caps', module=caps)
self.register_plugin('feature_compression', module=compression)
self.register_plugin('feature_register', module=register)
self.register_plugin('feature_sm', module=sm)
self.register_plugin('feature_csi', module=csi)
# register various xep plugins
self.register_plugin('xep_0030') # service discovery
self.register_plugin('xep_0092') # software version
self.add_event_handler('stream_negotiated', self._stream_negotiated)
self.add_event_handler("session_start", self.session_start)
self.add_event_handler("failed_auth", self.failed_auth)
self._stream_features = {}
def replace_plugin(self, name, module):
self.plugin.disable(name)
load_plugin(name, module)
self.plugin.enable(name)
def failed_auth(self, *args, **kwargs):
self.test.data['authenticated'] = False
self.test.save()
self.disconnect();
def process_stream_features(self):
# Process basic core features (stanza is present -> server supports it)
self.test.data['core']['session']['status'] = bool(
self._stream_features.pop('session', False))
self.test.data['core']['bind']['status'] = bool(
self._stream_features.pop('bind', False))
self.handle_tls_stream_feature('client')
self.handle_compression_stream_feature('client')
# Process SASL authentication mechanisms
sasl_stanza = self._stream_features.pop('mechanisms', None)
if sasl_stanza is not None:
self.test.data['core']['sasl']['status'] = True
algos = [n.text for n in sasl_stanza.findall('{%s}mechanism' % sasl_stanza.namespace)]
self.test.data['core']['sasl']['algorithms'] = algos
else:
self.test.data['core']['sasl']['status'] = False
# process XEPs
self.handle_stream_feature('0077', 'register')
self.handle_stream_feature('0078', 'status')
# xep 0079 may be discoverable via service discovery
self.handle_stream_feature('0079', 'amp', default=None)
self.handle_stream_feature('0115', 'c', kind='client')
self.handle_stream_feature('0198', 'sm')
self.handle_stream_feature('0237', 'ver')
self.handle_stream_feature('0352', 'csi')
if self._stream_features:
log.error('Unprocessed stream features: %s', sorted(self._stream_features))
def test_xep0030(self): # XEP-0030: Service Discovery
try:
info = self['xep_0030'].get_info(jid=self.boundjid.domain, ifrom=self.boundjid.full)
self.test.data['xeps']['0030']['status'] = True
features = info.values['disco_info']['features']
if 'jabber:iq:version' in features:
features.remove('jabber:iq:version')
self.test_xep0092()
else:
self.test.data['xeps']['0092']['status'] = False
# generic XEPs
for xep, feature in _feature_mappings.items():
self.test.data['xeps'][xep]['status'] = feature in features
self.test.save()
if feature in features:
features.remove(feature)
# remove any additional pubsub namespaces
features = [f for f in features if not
f.startswith('http://jabber.org/protocol/pubsub')]
# remove disco items
ignore_features = [
'http://jabber.org/protocol/disco#info',
'http://jabber.org/protocol/disco#items',
'jabber:iq:register',
]
features = [f for f in features if f not in ignore_features]
if features:
log.error('Unhandled disco features: %s', sorted(features))
except IqError as e:
if e.condition != 'feature-not-implemented':
log.error('[XEP-0030]: Unhandled condition: %s', e.condition)
self.test.data['xeps']['0030']['status'] = False
self.test.data['xeps']['0030']['condition'] = e.condition
def test_xep0092(self): # XEP-0092: Software Version
try:
version = self['xep_0092'].get_version(self.boundjid.domain, ifrom=self.boundjid.full)
if version['type'] == 'result':
self.test.data['xeps']['0092']['status'] = True
data = version.values['software_version']
# assemble a string displayed in the notes field
note = ''
if 'name' in data:
note += data['name']
if 'version' in data:
note += ' %s' % data['version']
if 'os' in data:
note += ' (%s)' % data['os']
self.test.data['xeps']['0092']['notes'] = note
else:
log.error('[XEP-0092]: Received IQ stanza of type "%s".', version['type'])
self.test.data['xeps']['0092']['status'] = False
except IqError as e:
if e.condition == 'feature-not-implemented':
self.test.data['xeps']['0092']['status'] = 'no'
else:
log.error('[XEP-0092]: Unhandled condition: %s', e.condition)
self.test.data['xeps']['0092']['status'] = False
self.test.data['xeps']['0092']['condition'] = e.condition
except Exception as e:
log.error("[XEP-0092] %s: %s", type(e).__name__, e)
self.test.data['xeps']['0092']['status'] = False
def _handle_stream_features(self, features):
"""Collect incoming stream features.
This method is invoked multiple times if (e.g. after starttls) the stream is renegotiated.
"""
if 'starttls' in features['features']: # Reset stream features after starttls
self._stream_features = {
'starttls': features['starttls'],
}
else: # New stream features encountered
self._stream_features.update(features.get_features())
# compute list of unhandled stream features
found_tags = set([re.match('{.*}(.*)', n.tag).groups(1)[0]
for n in features.xml.getchildren()])
unhandled = found_tags - set(features.get_features().keys())
if unhandled:
log.error("Unhandled stream features: %s", sorted(unhandled))
return super(StreamFeatureClient, self)._handle_stream_features(features)
def _stream_negotiated(self, *args, **kwargs):
self.process_stream_features()
self.test_xep0030()
self.disconnect()
def session_start(self, event):
pass
class StreamFeatureServer(BaseXMPP, StreamFeatureMixin):
def __init__(self, test, jid, lang='en'):
super(StreamFeatureServer, self).__init__(jid, default_ns='jabber:server')
self.test = test
self.use_ipv6 = settings.USE_IP6
self.auto_reconnect = False
self.test = test
self._stream_features = {}
# adapted from ClientXMPP
self.default_port = 5269
self.default_lang = lang
self.stream_header = "<stream:stream to='%s' %s %s %s %s>" % (
self.boundjid.host,
"xmlns:stream='%s'" % self.stream_ns,
"xmlns='%s'" % self.default_ns,
"xml:lang='%s'" % self.default_lang,
"version='1.0'")
self.stream_footer = "</stream:stream>"
self.features = set()
self._stream_feature_handlers = {}
self._stream_feature_order = []
self.dns_service = 'xmpp-server'
self.register_stanza(StreamFeatures)
self.register_handler(
Callback('Stream Features',
MatchXPath('{%s}features' % self.stream_ns),
self._handle_stream_features))
self.register_plugin('feature_starttls')
self.register_plugin('feature_dialback', module=dialback)
self.register_plugin('feature_sm', module=sm)
self.add_event_handler('stream_negotiated', self._stream_negotiated)
def connect(self, address=tuple(), reattempt=True,
use_tls=True, use_ssl=False):
"""Adapted from ClientXMPP.
When no address is given, a SRV lookup for the server will
be attempted. If that fails, the server user in the JID
will be used.
:param address: A tuple containing the server's host and port.
:param reattempt: If ``True``, repeat attempting to connect if an
error occurs. Defaults to ``True``.
:param use_tls: Indicates if TLS should be used for the
connection. Defaults to ``True``.
:param use_ssl: Indicates if the older SSL connection method
should be used. Defaults to ``False``.
"""
self.session_started_event.clear()
# If an address was provided, disable using DNS SRV lookup;
# otherwise, use the domain from the client JID with the standard
# XMPP client port and allow SRV lookup.
if address:
self.dns_service = None
else:
address = (self.boundjid.host, 5269)
self.dns_service = 'xmpp-server'
return super(StreamFeatureServer, self).connect(address[0], address[1], use_tls=use_tls,
use_ssl=use_ssl, reattempt=reattempt)
def _handle_stream_features(self, features):
if 'starttls' in features['features']: # Reset stream features after starttls
self._stream_features = {
'starttls': features['starttls'],
}
else: # New stream features encountered
self._stream_features.update(features.get_features())
found_tags = set([re.match('{.*}(.*)', n.tag).groups(1)[0]
for n in features.xml.getchildren()])
unhandled = found_tags - set(features.get_features().keys())
if unhandled:
log.error("Unhandled stream features: %s", sorted(unhandled))
return ClientXMPP._handle_stream_features(self, features)
def process_stream_features(self):
self.handle_tls_stream_feature('server')
self.handle_compression_stream_feature('server')
self.handle_stream_feature('0115', 'c', kind='server')
self.test.data['xeps']['0220']['status'] = bool(
self._stream_features.pop('dialback', False))
self.test.data['xeps']['0288']['status'] = bool(self._stream_features.pop('bidi', False))
if self._stream_features:
log.error('Unprocessed stream features: %s', sorted(self._stream_features))
def _stream_negotiated(self, *args, **kwargs):
self.process_stream_features()
self.disconnect()
register_feature = ClientXMPP.register_feature
| gpl-3.0 |
ashvina/heron | heron/tools/common/src/python/utils/config.py | 1 | 16671 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''config.py: util functions for config, mainly for heron-cli'''
import argparse
import contextlib
import getpass
import os
import sys
import subprocess
import tarfile
import tempfile
import yaml
from heron.common.src.python.utils.log import Log
# pylint: disable=logging-not-lazy
# default environ tag, if not provided
ENVIRON = "default"
# directories for heron distribution
BIN_DIR = "bin"
CONF_DIR = "conf"
ETC_DIR = "etc"
LIB_DIR = "lib"
CLI_DIR = ".heron"
RELEASE_YAML = "release.yaml"
ZIPPED_RELEASE_YAML = "scripts/packages/release.yaml"
OVERRIDE_YAML = "override.yaml"
# mode of deployment
DIRECT_MODE = 'direct'
SERVER_MODE = 'server'
# directories for heron sandbox
SANDBOX_CONF_DIR = "./heron-conf"
# config file for heron cli
CLIENT_YAML = "client.yaml"
# client configs for role and env for direct deployment
ROLE_REQUIRED = "heron.config.is.role.required"
ENV_REQUIRED = "heron.config.is.env.required"
# client config for role and env for server deployment
ROLE_KEY = "role.required"
ENVIRON_KEY = "env.required"
def create_tar(tar_filename, files, config_dir, config_files):
'''
Create a tar file with a given set of files
'''
with contextlib.closing(tarfile.open(tar_filename, 'w:gz', dereference=True)) as tar:
for filename in files:
if os.path.isfile(filename):
tar.add(filename, arcname=os.path.basename(filename))
else:
raise Exception("%s is not an existing file" % filename)
if os.path.isdir(config_dir):
tar.add(config_dir, arcname=get_heron_sandbox_conf_dir())
else:
raise Exception("%s is not an existing directory" % config_dir)
for filename in config_files:
if os.path.isfile(filename):
arcfile = os.path.join(get_heron_sandbox_conf_dir(), os.path.basename(filename))
tar.add(filename, arcname=arcfile)
else:
raise Exception("%s is not an existing file" % filename)
def get_subparser(parser, command):
'''
Retrieve the given subparser from parser
'''
# pylint: disable=protected-access
subparsers_actions = [action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers
for choice, subparser in subparsers_action.choices.items():
if choice == command:
return subparser
return None
def cygpath(x):
'''
normalized class path on cygwin
'''
command = ['cygpath', '-wp', x]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
result = p.communicate()
output = result[0]
lines = output.split("\n")
return lines[0]
def identity(x):
'''
identity function
'''
return x
def normalized_class_path(x):
'''
normalize path
'''
if sys.platform == 'cygwin':
return cygpath(x)
return identity(x)
def get_classpath(jars):
'''
Get the normalized class path of all jars
'''
return ':'.join(map(normalized_class_path, jars))
def get_heron_dir():
"""
This will extract heron directory from .pex file.
For example,
when __file__ is '/Users/heron-user/bin/heron/heron/tools/common/src/python/utils/config.pyc', and
its real path is '/Users/heron-user/.heron/bin/heron/tools/common/src/python/utils/config.pyc',
the internal variable ``path`` would be '/Users/heron-user/.heron', which is the heron directory
This means the variable `go_above_dirs` below is 9.
:return: root location of the .pex file
"""
go_above_dirs = 9
path = "/".join(os.path.realpath(__file__).split('/')[:-go_above_dirs])
return normalized_class_path(path)
def get_zipped_heron_dir():
"""
This will extract heron directory from .pex file,
with `zip_safe = False' Bazel flag added when building this .pex file
For example,
when __file__'s real path is
'/Users/heron-user/.pex/code/xxxyyy/heron/tools/common/src/python/utils/config.pyc', and
the internal variable ``path`` would be '/Users/heron-user/.pex/code/xxxyyy/',
which is the root PEX directory
This means the variable `go_above_dirs` below is 7.
:return: root location of the .pex file.
"""
go_above_dirs = 7
path = "/".join(os.path.realpath(__file__).split('/')[:-go_above_dirs])
return normalized_class_path(path)
################################################################################
# Get the root of heron dir and various sub directories depending on platform
################################################################################
def get_heron_bin_dir():
"""
This will provide heron bin directory from .pex file.
:return: absolute path of heron lib directory
"""
bin_path = os.path.join(get_heron_dir(), BIN_DIR)
return bin_path
def get_heron_conf_dir():
"""
This will provide heron conf directory from .pex file.
:return: absolute path of heron conf directory
"""
conf_path = os.path.join(get_heron_dir(), CONF_DIR)
return conf_path
def get_heron_lib_dir():
"""
This will provide heron lib directory from .pex file.
:return: absolute path of heron lib directory
"""
lib_path = os.path.join(get_heron_dir(), LIB_DIR)
return lib_path
def get_heron_release_file():
"""
This will provide the path to heron release.yaml file
:return: absolute path of heron release.yaml file
"""
return os.path.join(get_heron_dir(), RELEASE_YAML)
def get_zipped_heron_release_file():
"""
This will provide the path to heron release.yaml file.
To be used for .pex file built with `zip_safe = False` flag.
For example, `heron-ui'.
:return: absolute path of heron release.yaml file
"""
return os.path.join(get_zipped_heron_dir(), ZIPPED_RELEASE_YAML)
def get_heron_cluster_conf_dir(cluster, default_config_path):
"""
This will provide heron cluster config directory, if config path is default
:return: absolute path of heron cluster conf directory
"""
return os.path.join(default_config_path, cluster)
def get_heron_sandbox_conf_dir():
"""
This will provide heron conf directory in the sandbox
:return: relative path of heron sandbox conf directory
"""
return SANDBOX_CONF_DIR
def get_heron_libs(local_jars):
"""Get all the heron lib jars with the absolute paths"""
heron_lib_dir = get_heron_lib_dir()
heron_libs = [os.path.join(heron_lib_dir, f) for f in local_jars]
return heron_libs
def get_heron_cluster(cluster_role_env):
"""Get the cluster to which topology is submitted"""
return cluster_role_env.split('/')[0]
################################################################################
# pylint: disable=too-many-branches,superfluous-parens
def parse_cluster_role_env(cluster_role_env, config_path):
"""Parse cluster/[role]/[environ], supply default, if not provided, not required"""
parts = cluster_role_env.split('/')[:3]
if not os.path.isdir(config_path):
Log.error("Config path cluster directory does not exist: %s" % config_path)
raise Exception("Invalid config path")
# if cluster/role/env is not completely provided, check further
if len(parts) < 3:
cli_conf_file = os.path.join(config_path, CLIENT_YAML)
# if client conf doesn't exist, use default value
if not os.path.isfile(cli_conf_file):
if len(parts) == 1:
parts.append(getpass.getuser())
if len(parts) == 2:
parts.append(ENVIRON)
else:
cli_confs = {}
with open(cli_conf_file, 'r') as conf_file:
tmp_confs = yaml.load(conf_file)
# the return value of yaml.load can be None if conf_file is an empty file
if tmp_confs is not None:
cli_confs = tmp_confs
else:
print("Failed to read: %s due to it is empty" % (CLIENT_YAML))
# if role is required but not provided, raise exception
if len(parts) == 1:
if (ROLE_REQUIRED in cli_confs) and (cli_confs[ROLE_REQUIRED] is True):
raise Exception("role required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ROLE_REQUIRED, cli_conf_file))
else:
parts.append(getpass.getuser())
# if environ is required but not provided, raise exception
if len(parts) == 2:
if (ENV_REQUIRED in cli_confs) and (cli_confs[ENV_REQUIRED] is True):
raise Exception("environ required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ENV_REQUIRED, cli_conf_file))
else:
parts.append(ENVIRON)
# if cluster or role or environ is empty, print
if len(parts[0]) == 0 or len(parts[1]) == 0 or len(parts[2]) == 0:
print("Failed to parse")
sys.exit(1)
return (parts[0], parts[1], parts[2])
################################################################################
def get_cluster_role_env(cluster_role_env):
"""Parse cluster/[role]/[environ], supply empty string, if not provided"""
parts = cluster_role_env.split('/')[:3]
if len(parts) == 3:
return (parts[0], parts[1], parts[2])
if len(parts) == 2:
return (parts[0], parts[1], "")
if len(parts) == 1:
return (parts[0], "", "")
return ("", "", "")
################################################################################
def direct_mode_cluster_role_env(cluster_role_env, config_path):
"""Check cluster/[role]/[environ], if they are required"""
# otherwise, get the client.yaml file
cli_conf_file = os.path.join(config_path, CLIENT_YAML)
# if client conf doesn't exist, use default value
if not os.path.isfile(cli_conf_file):
return True
client_confs = {}
with open(cli_conf_file, 'r') as conf_file:
client_confs = yaml.load(conf_file)
# the return value of yaml.load can be None if conf_file is an empty file
if not client_confs:
return True
# if role is required but not provided, raise exception
role_present = True if len(cluster_role_env[1]) > 0 else False
if ROLE_REQUIRED in client_confs and client_confs[ROLE_REQUIRED] and not role_present:
raise Exception("role required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ROLE_REQUIRED, cli_conf_file))
# if environ is required but not provided, raise exception
environ_present = True if len(cluster_role_env[2]) > 0 else False
if ENV_REQUIRED in client_confs and client_confs[ENV_REQUIRED] and not environ_present:
raise Exception("environ required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ENV_REQUIRED, cli_conf_file))
return True
################################################################################
def server_mode_cluster_role_env(cluster_role_env, config_map):
"""Check cluster/[role]/[environ], if they are required"""
cmap = config_map[cluster_role_env[0]]
# if role is required but not provided, raise exception
role_present = True if len(cluster_role_env[1]) > 0 else False
if ROLE_KEY in cmap and cmap[ROLE_KEY] and not role_present:
raise Exception("role required but not provided (cluster/role/env = %s)."\
% (cluster_role_env))
# if environ is required but not provided, raise exception
environ_present = True if len(cluster_role_env[2]) > 0 else False
if ENVIRON_KEY in cmap and cmap[ENVIRON_KEY] and not environ_present:
raise Exception("environ required but not provided (cluster/role/env = %s)."\
% (cluster_role_env))
return True
################################################################################
def defaults_cluster_role_env(cluster_role_env):
"""
if role is not provided, supply userid
if environ is not provided, supply 'default'
"""
if len(cluster_role_env[1]) == 0 and len(cluster_role_env[2]) == 0:
return (cluster_role_env[0], getpass.getuser(), ENVIRON)
return (cluster_role_env[0], cluster_role_env[1], cluster_role_env[2])
################################################################################
# Parse the command line for overriding the defaults
################################################################################
def parse_override_config_and_write_file(namespace):
"""
Parse the command line for overriding the defaults and
create an override file.
"""
overrides = parse_override_config(namespace)
try:
tmp_dir = tempfile.mkdtemp()
override_config_file = os.path.join(tmp_dir, OVERRIDE_YAML)
with open(override_config_file, 'w') as f:
f.write(yaml.dump(overrides))
return override_config_file
except Exception as e:
raise Exception("Failed to parse override config: %s" % str(e))
def parse_override_config(namespace):
"""Parse the command line for overriding the defaults"""
overrides = dict()
for config in namespace:
kv = config.split("=")
if len(kv) != 2:
raise Exception("Invalid config property format (%s) expected key=value" % config)
overrides[kv[0]] = kv[1]
return overrides
def get_java_path():
"""Get the path of java executable"""
java_home = os.environ.get("JAVA_HOME")
return os.path.join(java_home, BIN_DIR, "java")
def check_java_home_set():
"""Check if the java home set"""
# check if environ variable is set
if "JAVA_HOME" not in os.environ:
Log.error("JAVA_HOME not set")
return False
# check if the value set is correct
java_path = get_java_path()
if os.path.isfile(java_path) and os.access(java_path, os.X_OK):
return True
Log.error("JAVA_HOME/bin/java either does not exist or not an executable")
return False
def check_release_file_exists():
"""Check if the release.yaml file exists"""
release_file = get_heron_release_file()
# if the file does not exist and is not a file
if not os.path.isfile(release_file):
Log.error("Required file not found: %s" % release_file)
return False
return True
def print_build_info(zipped_pex=False):
"""Print build_info from release.yaml
:param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.
"""
if zipped_pex:
release_file = get_zipped_heron_release_file()
else:
release_file = get_heron_release_file()
with open(release_file) as release_info:
release_map = yaml.load(release_info)
release_items = sorted(release_map.items(), key=lambda tup: tup[0])
for key, value in release_items:
print("%s : %s" % (key, value))
def get_version_number(zipped_pex=False):
"""Print version from release.yaml
:param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.
"""
if zipped_pex:
release_file = get_zipped_heron_release_file()
else:
release_file = get_heron_release_file()
with open(release_file) as release_info:
for line in release_info:
trunks = line[:-1].split(' ')
if trunks[0] == 'heron.build.version':
return trunks[-1].replace("'", "")
return 'unknown'
def insert_bool(param, command_args):
'''
:param param:
:param command_args:
:return:
'''
index = 0
found = False
for lelem in command_args:
if lelem == '--' and not found:
break
if lelem == param:
found = True
break
index = index + 1
if found:
command_args.insert(index + 1, 'True')
return command_args
def insert_bool_values(command_line_args):
'''
:param command_line_args:
:return:
'''
args1 = insert_bool('--verbose', command_line_args)
args2 = insert_bool('--deploy-deactivated', args1)
return args2
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _format_action(self, action):
# pylint: disable=bad-super-call
parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)
if action.nargs == argparse.PARSER:
parts = "\n".join(parts.split("\n")[1:])
return parts
| apache-2.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/nose/suite.py | 57 | 22341 | """
Test Suites
-----------
Provides a LazySuite, which is a suite whose test list is a generator
function, and ContextSuite,which can run fixtures (setup/teardown
functions or methods) for the context that contains its tests.
"""
from __future__ import generators
import logging
import sys
import unittest
from nose.case import Test
from nose.config import Config
from nose.proxy import ResultProxyFactory
from nose.util import isclass, resolve_name, try_run
if sys.platform == 'cli':
if sys.version_info[:2] < (2, 6):
import clr
clr.AddReference("IronPython")
from IronPython.Runtime.Exceptions import StringException
else:
class StringException(Exception):
pass
log = logging.getLogger(__name__)
#log.setLevel(logging.DEBUG)
# Singleton for default value -- see ContextSuite.__init__ below
_def = object()
def _strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
class MixedContextError(Exception):
"""Error raised when a context suite sees tests from more than
one context.
"""
pass
class LazySuite(unittest.TestSuite):
"""A suite that may use a generator as its list of tests
"""
def __init__(self, tests=()):
"""Initialize the suite. tests may be an iterable or a generator
"""
super(LazySuite, self).__init__()
self._set_tests(tests)
def __iter__(self):
return iter(self._tests)
def __repr__(self):
return "<%s tests=generator (%s)>" % (
_strclass(self.__class__), id(self))
def __hash__(self):
return object.__hash__(self)
__str__ = __repr__
def addTest(self, test):
self._precache.append(test)
# added to bypass run changes in 2.7's unittest
def run(self, result):
for test in self._tests:
if result.shouldStop:
break
test(result)
return result
def __nonzero__(self):
log.debug("tests in %s?", id(self))
if self._precache:
return True
if self.test_generator is None:
return False
try:
test = self.test_generator.next()
if test is not None:
self._precache.append(test)
return True
except StopIteration:
pass
return False
def _get_tests(self):
log.debug("precache is %s", self._precache)
for test in self._precache:
yield test
if self.test_generator is None:
return
for test in self.test_generator:
yield test
def _set_tests(self, tests):
self._precache = []
is_suite = isinstance(tests, unittest.TestSuite)
if callable(tests) and not is_suite:
self.test_generator = tests()
elif is_suite:
# Suites need special treatment: they must be called like
# tests for their setup/teardown to run (if any)
self.addTests([tests])
self.test_generator = None
else:
self.addTests(tests)
self.test_generator = None
_tests = property(_get_tests, _set_tests, None,
"Access the tests in this suite. Access is through a "
"generator, so iteration may not be repeatable.")
class ContextSuite(LazySuite):
"""A suite with context.
A ContextSuite executes fixtures (setup and teardown functions or
methods) for the context containing its tests.
The context may be explicitly passed. If it is not, a context (or
nested set of contexts) will be constructed by examining the tests
in the suite.
"""
failureException = unittest.TestCase.failureException
was_setup = False
was_torndown = False
classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
'setUpClass', 'setUpAll')
classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
'teardownAll', 'tearDownClass', 'tearDownAll')
moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
'setUp')
moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
'teardown', 'tearDown')
packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
packageTeardown = ('teardown_package', 'teardownPackage',
'tearDownPackage')
def __init__(self, tests=(), context=None, factory=None,
config=None, resultProxy=None, can_split=True):
log.debug("Context suite for %s (%s) (%s)", tests, context, id(self))
self.context = context
self.factory = factory
if config is None:
config = Config()
self.config = config
self.resultProxy = resultProxy
self.has_run = False
self.can_split = can_split
self.error_context = None
super(ContextSuite, self).__init__(tests)
def __repr__(self):
return "<%s context=%s>" % (
_strclass(self.__class__),
getattr(self.context, '__name__', self.context))
__str__ = __repr__
def id(self):
if self.error_context:
return '%s:%s' % (repr(self), self.error_context)
else:
return repr(self)
def __hash__(self):
return object.__hash__(self)
# 2.3 compat -- force 2.4 call sequence
def __call__(self, *arg, **kw):
return self.run(*arg, **kw)
def exc_info(self):
"""Hook for replacing error tuple output
"""
return sys.exc_info()
def _exc_info(self):
"""Bottleneck to fix up IronPython string exceptions
"""
e = self.exc_info()
if sys.platform == 'cli':
if isinstance(e[0], StringException):
# IronPython throws these StringExceptions, but
# traceback checks type(etype) == str. Make a real
# string here.
e = (str(e[0]), e[1], e[2])
return e
def run(self, result):
"""Run tests in suite inside of suite fixtures.
"""
# proxy the result for myself
log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
#import pdb
#pdb.set_trace()
if self.resultProxy:
result, orig = self.resultProxy(result, self), result
else:
result, orig = result, result
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
self.error_context = 'setup'
result.addError(self, self._exc_info())
return
try:
for test in self._tests:
if result.shouldStop:
log.debug("stopping")
break
# each nose.case.Test will create its own result proxy
# so the cases need the original result, to avoid proxy
# chains
test(orig)
finally:
self.has_run = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
self.error_context = 'teardown'
result.addError(self, self._exc_info())
def hasFixtures(self, ctx_callback=None):
context = self.context
if context is None:
return False
if self.implementsAnyFixture(context, ctx_callback=ctx_callback):
return True
# My context doesn't have any, but its ancestors might
factory = self.factory
if factory:
ancestors = factory.context.get(self, [])
for ancestor in ancestors:
if self.implementsAnyFixture(
ancestor, ctx_callback=ctx_callback):
return True
return False
def implementsAnyFixture(self, context, ctx_callback):
if isclass(context):
names = self.classSetup + self.classTeardown
else:
names = self.moduleSetup + self.moduleTeardown
if hasattr(context, '__path__'):
names += self.packageSetup + self.packageTeardown
# If my context has any fixture attribute, I have fixtures
fixt = False
for m in names:
if hasattr(context, m):
fixt = True
break
if ctx_callback is None:
return fixt
return ctx_callback(context, fixt)
def setUp(self):
log.debug("suite %s setUp called, tests: %s", id(self), self._tests)
if not self:
# I have no tests
log.debug("suite %s has no tests", id(self))
return
if self.was_setup:
log.debug("suite %s already set up", id(self))
return
context = self.context
if context is None:
return
# before running my own context's setup, I need to
# ask the factory if my context's contexts' setups have been run
factory = self.factory
if factory:
# get a copy, since we'll be destroying it as we go
ancestors = factory.context.get(self, [])[:]
while ancestors:
ancestor = ancestors.pop()
log.debug("ancestor %s may need setup", ancestor)
if ancestor in factory.was_setup:
continue
log.debug("ancestor %s does need setup", ancestor)
self.setupContext(ancestor)
if not context in factory.was_setup:
self.setupContext(context)
else:
self.setupContext(context)
self.was_setup = True
log.debug("completed suite setup")
def setupContext(self, context):
self.config.plugins.startContext(context)
log.debug("%s setup context %s", self, context)
if self.factory:
if context in self.factory.was_setup:
return
# note that I ran the setup for this context, so that I'll run
# the teardown in my teardown
self.factory.was_setup[context] = self
if isclass(context):
names = self.classSetup
else:
names = self.moduleSetup
if hasattr(context, '__path__'):
names = self.packageSetup + names
try_run(context, names)
def shortDescription(self):
if self.context is None:
return "test suite"
return "test suite for %s" % self.context
def tearDown(self):
log.debug('context teardown')
if not self.was_setup or self.was_torndown:
log.debug(
"No reason to teardown (was_setup? %s was_torndown? %s)"
% (self.was_setup, self.was_torndown))
return
self.was_torndown = True
context = self.context
if context is None:
log.debug("No context to tear down")
return
# for each ancestor... if the ancestor was setup
# and I did the setup, I can do teardown
factory = self.factory
if factory:
ancestors = factory.context.get(self, []) + [context]
for ancestor in ancestors:
log.debug('ancestor %s may need teardown', ancestor)
if not ancestor in factory.was_setup:
log.debug('ancestor %s was not setup', ancestor)
continue
if ancestor in factory.was_torndown:
log.debug('ancestor %s already torn down', ancestor)
continue
setup = factory.was_setup[ancestor]
log.debug("%s setup ancestor %s", setup, ancestor)
if setup is self:
self.teardownContext(ancestor)
else:
self.teardownContext(context)
def teardownContext(self, context):
log.debug("%s teardown context %s", self, context)
if self.factory:
if context in self.factory.was_torndown:
return
self.factory.was_torndown[context] = self
if isclass(context):
names = self.classTeardown
else:
names = self.moduleTeardown
if hasattr(context, '__path__'):
names = self.packageTeardown + names
try_run(context, names)
self.config.plugins.stopContext(context)
# FIXME the wrapping has to move to the factory?
def _get_wrapped_tests(self):
for test in self._get_tests():
if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
yield test
else:
yield Test(test,
config=self.config,
resultProxy=self.resultProxy)
_tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
"Access the tests in this suite. Tests are returned "
"inside of a context wrapper.")
class ContextSuiteFactory(object):
"""Factory for ContextSuites. Called with a collection of tests,
the factory decides on a hierarchy of contexts by introspecting
the collection or the tests themselves to find the objects
containing the test objects. It always returns one suite, but that
suite may consist of a hierarchy of nested suites.
"""
suiteClass = ContextSuite
def __init__(self, config=None, suiteClass=None, resultProxy=_def):
if config is None:
config = Config()
self.config = config
if suiteClass is not None:
self.suiteClass = suiteClass
# Using a singleton to represent default instead of None allows
# passing resultProxy=None to turn proxying off.
if resultProxy is _def:
resultProxy = ResultProxyFactory(config=config)
self.resultProxy = resultProxy
self.suites = {}
self.context = {}
self.was_setup = {}
self.was_torndown = {}
def __call__(self, tests, **kw):
"""Return ``ContextSuite`` for tests. ``tests`` may either
be a callable (in which case the resulting ContextSuite will
have no parent context and be evaluated lazily) or an
iterable. In that case the tests will wrapped in
nose.case.Test, be examined and the context of each found and a
suite of suites returned, organized into a stack with the
outermost suites belonging to the outermost contexts.
"""
log.debug("Create suite for %s", tests)
context = kw.pop('context', getattr(tests, 'context', None))
log.debug("tests %s context %s", tests, context)
if context is None:
tests = self.wrapTests(tests)
try:
context = self.findContext(tests)
except MixedContextError:
return self.makeSuite(self.mixedSuites(tests), None, **kw)
return self.makeSuite(tests, context, **kw)
def ancestry(self, context):
"""Return the ancestry of the context (that is, all of the
packages and modules containing the context), in order of
descent with the outermost ancestor last.
This method is a generator.
"""
log.debug("get ancestry %s", context)
if context is None:
return
# Methods include reference to module they are defined in, we
# don't want that, instead want the module the class is in now
# (classes are re-ancestored elsewhere).
if hasattr(context, 'im_class'):
context = context.im_class
elif hasattr(context, '__self__'):
context = context.__self__.__class__
if hasattr(context, '__module__'):
ancestors = context.__module__.split('.')
elif hasattr(context, '__name__'):
ancestors = context.__name__.split('.')[:-1]
else:
raise TypeError("%s has no ancestors?" % context)
while ancestors:
log.debug(" %s ancestors %s", context, ancestors)
yield resolve_name('.'.join(ancestors))
ancestors.pop()
def findContext(self, tests):
if callable(tests) or isinstance(tests, unittest.TestSuite):
return None
context = None
for test in tests:
# Don't look at suites for contexts, only tests
ctx = getattr(test, 'context', None)
if ctx is None:
continue
if context is None:
context = ctx
elif context != ctx:
raise MixedContextError(
"Tests with different contexts in same suite! %s != %s"
% (context, ctx))
return context
def makeSuite(self, tests, context, **kw):
suite = self.suiteClass(
tests, context=context, config=self.config, factory=self,
resultProxy=self.resultProxy, **kw)
if context is not None:
self.suites.setdefault(context, []).append(suite)
self.context.setdefault(suite, []).append(context)
log.debug("suite %s has context %s", suite,
getattr(context, '__name__', None))
for ancestor in self.ancestry(context):
self.suites.setdefault(ancestor, []).append(suite)
self.context[suite].append(ancestor)
log.debug("suite %s has ancestor %s", suite, ancestor.__name__)
return suite
def mixedSuites(self, tests):
"""The complex case where there are tests that don't all share
the same context. Groups tests into suites with common ancestors,
according to the following (essentially tail-recursive) procedure:
Starting with the context of the first test, if it is not
None, look for tests in the remaining tests that share that
ancestor. If any are found, group into a suite with that
ancestor as the context, and replace the current suite with
that suite. Continue this process for each ancestor of the
first test, until all ancestors have been processed. At this
point if any tests remain, recurse with those tests as the
input, returning a list of the common suite (which may be the
suite or test we started with, if no common tests were found)
plus the results of recursion.
"""
if not tests:
return []
head = tests.pop(0)
if not tests:
return [head] # short circuit when none are left to combine
suite = head # the common ancestry suite, so far
tail = tests[:]
context = getattr(head, 'context', None)
if context is not None:
ancestors = [context] + [a for a in self.ancestry(context)]
for ancestor in ancestors:
common = [suite] # tests with ancestor in common, so far
remain = [] # tests that remain to be processed
for test in tail:
found_common = False
test_ctx = getattr(test, 'context', None)
if test_ctx is None:
remain.append(test)
continue
if test_ctx is ancestor:
common.append(test)
continue
for test_ancestor in self.ancestry(test_ctx):
if test_ancestor is ancestor:
common.append(test)
found_common = True
break
if not found_common:
remain.append(test)
if common:
suite = self.makeSuite(common, ancestor)
tail = self.mixedSuites(remain)
return [suite] + tail
def wrapTests(self, tests):
log.debug("wrap %s", tests)
if callable(tests) or isinstance(tests, unittest.TestSuite):
log.debug("I won't wrap")
return tests
wrapped = []
for test in tests:
log.debug("wrapping %s", test)
if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
wrapped.append(test)
elif isinstance(test, ContextList):
wrapped.append(self.makeSuite(test, context=test.context))
else:
wrapped.append(
Test(test, config=self.config, resultProxy=self.resultProxy)
)
return wrapped
class ContextList(object):
"""Not quite a suite -- a group of tests in a context. This is used
to hint the ContextSuiteFactory about what context the tests
belong to, in cases where it may be ambiguous or missing.
"""
def __init__(self, tests, context=None):
self.tests = tests
self.context = context
def __iter__(self):
return iter(self.tests)
class FinalizingSuiteWrapper(unittest.TestSuite):
"""Wraps suite and calls final function after suite has
executed. Used to call final functions in cases (like running in
the standard test runner) where test running is not under nose's
control.
"""
def __init__(self, suite, finalize):
super(FinalizingSuiteWrapper, self).__init__()
self.suite = suite
self.finalize = finalize
def __call__(self, *arg, **kw):
return self.run(*arg, **kw)
# 2.7 compat
def __iter__(self):
return iter(self.suite)
def run(self, *arg, **kw):
try:
return self.suite(*arg, **kw)
finally:
self.finalize(*arg, **kw)
# backwards compat -- sort of
class TestDir:
def __init__(*arg, **kw):
raise NotImplementedError(
"TestDir is not usable with nose 0.10. The class is present "
"in nose.suite for backwards compatibility purposes but it "
"may not be used.")
class TestModule:
def __init__(*arg, **kw):
raise NotImplementedError(
"TestModule is not usable with nose 0.10. The class is present "
"in nose.suite for backwards compatibility purposes but it "
"may not be used.")
| mit |
OptimalBPM/optimal_file_sync | service/main.py | 1 | 1191 |
__author__ = 'Nicklas Boerjesson'
from plyer import notification
import os
from kivy.utils import platform
platform = platform()
if __name__ == '__main__':
"""Initialize the service"""
try:
# Import the service main class
from syncservice import SyncService
except Exception as e:
notification.notify("Optimal File Sync Service", "Error in init :" + str(e))
try:
# Find home dir
if platform == "android":
_home = "/storage/emulated/0/Android/data/"
else:
_home = os.path.expanduser("~")
# Check if there is a settings file there
_config_path = os.path.join(_home, "se.optimalbpm.optimal_file_sync/config.txt")
# Raise error if non existing config
if not os.path.exists(_config_path):
notification.notify("Optimal File Sync Service", "Could not find config: " + _config_path + ", quitting.")
else:
# Pass to service
_service=SyncService(_cfg_file=_config_path)
_service.start("Default")
except Exception as e:
notification.notify("Optimal File Sync Service", "Error finding config :" + str(e))
| apache-2.0 |
JingJunYin/tensorflow | tensorflow/python/kernel_tests/where_op_test.py | 39 | 5579 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.test_session(use_gpu=True):
ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def testWrongNumbers(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
array_ops.where([False, True], None, [1, 2])
def testBasicVec(self):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth)
def testRandomVec(self):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth)
def testBasicMat(self):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth)
def testBasic3Tensor(self):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth)
def _testRandom(self, dtype, expected_err_re=None):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re)
def testRandomBool(self):
self._testRandom(np.bool)
def testRandomInt32(self):
self._testRandom(np.int32)
def testRandomInt64(self):
self._testRandom(np.int64)
def testRandomFloat(self):
self._testRandom(np.float32)
def testRandomDouble(self):
self._testRandom(np.float64)
def testRandomComplex64(self):
self._testRandom(np.complex64)
def testRandomComplex128(self):
self._testRandom(np.complex128)
def testRandomUint8(self):
self._testRandom(np.uint8)
def testRandomInt8(self):
self._testRandom(np.int8)
def testRandomInt16(self):
self._testRandom(np.int16)
def testThreeArgument(self):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.test_session(use_gpu=True):
tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session() as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
| apache-2.0 |
geodynamics/snac | StGermain/Base/IO/Python/Dictionary.py | 6 | 2716 | #!/usr/bin/env python
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##
## Copyright (C), 2003, Victorian Partnership for Advanced Computing (VPAC) Ltd, 110 Victoria Street, Melbourne, 3053, Australia.
##
## Authors:
## Stevan M. Quenette, Senior Software Engineer, VPAC. (steve@vpac.org)
## Patrick D. Sunter, Software Engineer, VPAC. (pds@vpac.org)
## Luke J. Hodkinson, Computational Engineer, VPAC. (lhodkins@vpac.org)
## Siew-Ching Tan, Software Engineer, VPAC. (siew@vpac.org)
## Alan H. Lo, Computational Engineer, VPAC. (alan@vpac.org)
## Raquibul Hassan, Computational Engineer, VPAC. (raq@vpac.org)
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
## $Id: Dictionary.py 3462 2006-02-19 06:53:24Z WalterLandry $
##
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import StGermain.Base.Bindings.Dictionary as bindings
def copyright():
return "StGermain.Base.IO.Dictionary Python module: Copyright (c) 2003 Victorian Partnership for Advanced Computing (VPAC) Ltd. Australia.";
def new():
return bindings.New()
class Dictionary:
def Print( self ):
return bindings.Print( self._handle )
def add( self, name, value ):
# If the value has a handle, add it as a struct/dictionary
try:
return bindings.AddStruct( self._handle, name, value._handle )
except AttributeError:
if value.__class__ == "<type 'list'>":
bindings.AddList( self._handle, name, value )
addToList( self._handle, name, value )
else:
return bindings.AddString( self._handle, name, value )
def addToList( self, listName, value ):
#TODO addToList( self, listName, value )
return None
def LoadFromFile( self, filename ):
return bindings.LoadFromFile( self._handle, filename )
def __init__( self, handle ):
self._handle = handle
return
# version
__id__ = "$Id: Dictionary.py 3462 2006-02-19 06:53:24Z WalterLandry $"
| gpl-2.0 |
gsnbng/erpnext | erpnext/stock/report/item_price_stock/item_price_stock.py | 5 | 3820 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns=get_columns()
data=get_data(filters,columns)
return columns, data
def get_columns():
return [
{
"label": _("Item Code"),
"fieldname": "item_code",
"fieldtype": "Link",
"options": "Item",
"width": 120
},
{
"label": _("Item Name"),
"fieldname": "item_name",
"fieldtype": "Data",
"width": 120
},
{
"label": _("Brand"),
"fieldname": "brand",
"fieldtype": "Data",
"width": 100
},
{
"label": _("Warehouse"),
"fieldname": "warehouse",
"fieldtype": "Link",
"options": "Warehouse",
"width": 120
},
{
"label": _("Stock Available"),
"fieldname": "stock_available",
"fieldtype": "Float",
"width": 120
},
{
"label": _("Buying Price List"),
"fieldname": "buying_price_list",
"fieldtype": "Link",
"options": "Price List",
"width": 120
},
{
"label": _("Buying Rate"),
"fieldname": "buying_rate",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Selling Price List"),
"fieldname": "selling_price_list",
"fieldtype": "Link",
"options": "Price List",
"width": 120
},
{
"label": _("Selling Rate"),
"fieldname": "selling_rate",
"fieldtype": "Currency",
"width": 120
}
]
def get_data(filters, columns):
item_price_qty_data = []
item_price_qty_data = get_item_price_qty_data(filters)
return item_price_qty_data
def get_item_price_qty_data(filters):
conditions = ""
if filters.get("item_code"):
conditions += "where a.item_code=%(item_code)s"
item_results = frappe.db.sql("""select a.item_code, a.item_name, a.name as price_list_name,
a.brand as brand, b.warehouse as warehouse, b.actual_qty as actual_qty
from `tabItem Price` a left join `tabBin` b
ON a.item_code = b.item_code
{conditions}"""
.format(conditions=conditions), filters, as_dict=1)
price_list_names = list(set([item.price_list_name for item in item_results]))
buying_price_map = get_price_map(price_list_names, buying=1)
selling_price_map = get_price_map(price_list_names, selling=1)
result = []
if item_results:
for item_dict in item_results:
data = {
'item_code': item_dict.item_code,
'item_name': item_dict.item_name,
'brand': item_dict.brand,
'warehouse': item_dict.warehouse,
'stock_available': item_dict.actual_qty or 0,
'buying_price_list': "",
'buying_rate': 0.0,
'selling_price_list': "",
'selling_rate': 0.0
}
price_list = item_dict["price_list_name"]
if buying_price_map.get(price_list):
data["buying_price_list"] = buying_price_map.get(price_list)["Buying Price List"] or ""
data["buying_rate"] = buying_price_map.get(price_list)["Buying Rate"] or 0
if selling_price_map.get(price_list):
data["selling_price_list"] = selling_price_map.get(price_list)["Selling Price List"] or ""
data["selling_rate"] = selling_price_map.get(price_list)["Selling Rate"] or 0
result.append(data)
return result
def get_price_map(price_list_names, buying=0, selling=0):
price_map = {}
if not price_list_names:
return price_map
rate_key = "Buying Rate" if buying else "Selling Rate"
price_list_key = "Buying Price List" if buying else "Selling Price List"
filters = {"name": ("in", price_list_names)}
if buying:
filters["buying"] = 1
else:
filters["selling"] = 1
pricing_details = frappe.get_all("Item Price",
fields = ["name", "price_list", "price_list_rate"], filters=filters)
for d in pricing_details:
name = d["name"]
price_map[name] = {
price_list_key :d["price_list"],
rate_key :d["price_list_rate"]
}
return price_map
| agpl-3.0 |
afonsoduarte/gardensquareproject-site | templates/node_modules/node-sass/node_modules/pangyp/gyp/tools/pretty_gyp.py | 2618 | 4756 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
zouyapeng/horizon_change | openstack_dashboard/dashboards/admin/volumes/volume_types/qos_specs/tests.py | 7 | 7771 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class QosSpecsTests(test.BaseAdminViewTests):
@test.create_stubs({api.cinder: ('qos_spec_get',), })
def test_manage_qos_spec(self):
qos_spec = self.cinder_qos_specs.first()
index_url = reverse(
'horizon:admin:volumes:volume_types:qos_specs:index',
args=[qos_spec.id])
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id)\
.AndReturn(qos_spec)
self.mox.ReplayAll()
res = self.client.get(index_url)
self.assertTemplateUsed(res,
'admin/volumes/volume_types/qos_specs/index.html')
rows = res.context['table'].get_rows()
specs = self.cinder_qos_specs.first().specs
for row in rows:
key = row.cells['key'].data
self.assertTrue(key in specs)
self.assertEqual(row.cells['value'].data,
specs.get(key))
@test.create_stubs({api.cinder: ('qos_spec_create',)})
def test_create_qos_spec(self):
formData = {'name': 'qos-spec-1',
'consumer': 'back-end'}
api.cinder.qos_spec_create(IsA(http.HttpRequest),
formData['name'],
{'consumer': formData['consumer']}).\
AndReturn(self.cinder_qos_specs.first())
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volume_types:create_qos_spec'),
formData)
redirect = reverse('horizon:admin:volumes:volume_types_tab')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, redirect)
self.assertMessageCount(success=1)
@test.create_stubs({api.cinder: ('volume_type_list_with_qos_associations',
'qos_spec_list',
'qos_spec_delete',)})
def test_delete_qos_spec(self):
qos_spec = self.cinder_qos_specs.first()
formData = {'action': 'qos_specs__delete__%s' % qos_spec.id}
api.cinder.volume_type_list_with_qos_associations(
IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
api.cinder.qos_spec_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_qos_specs.list())
api.cinder.qos_spec_delete(IsA(http.HttpRequest),
str(qos_spec.id))
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volume_types_tab'),
formData)
redirect = reverse('horizon:admin:volumes:volume_types_tab')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, redirect)
self.assertMessageCount(success=1)
@test.create_stubs({api.cinder: ('qos_spec_get',
'qos_spec_get_keys',
'qos_spec_set_keys',), })
def test_spec_edit(self):
qos_spec = self.cinder_qos_specs.first()
key = 'minIOPS'
edit_url = reverse('horizon:admin:volumes:volume_types:qos_specs:edit',
args=[qos_spec.id, key])
index_url = reverse(
'horizon:admin:volumes:volume_types:qos_specs:index',
args=[qos_spec.id])
data = {'value': '9999'}
qos_spec.specs[key] = data['value']
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id)\
.AndReturn(qos_spec)
api.cinder.qos_spec_get_keys(IsA(http.HttpRequest),
qos_spec.id, raw=True)\
.AndReturn(qos_spec)
api.cinder.qos_spec_set_keys(IsA(http.HttpRequest),
qos_spec.id,
qos_spec.specs)
self.mox.ReplayAll()
resp = self.client.post(edit_url, data)
self.assertNoFormErrors(resp)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(resp, index_url)
@test.create_stubs({api.cinder: ('qos_spec_get',
'qos_spec_set_keys',), })
def test_edit_consumer(self):
qos_spec = self.cinder_qos_specs.first()
# modify consumer to 'front-end'
formData = {'consumer_choice': 'front-end'}
edit_url = reverse(
'horizon:admin:volumes:volume_types:edit_qos_spec_consumer',
args=[qos_spec.id])
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id).AndReturn(qos_spec)
api.cinder.qos_spec_set_keys(IsA(http.HttpRequest),
qos_spec.id,
{'consumer': formData['consumer_choice']})
self.mox.ReplayAll()
resp = self.client.post(edit_url, formData)
redirect = reverse('horizon:admin:volumes:volume_types_tab')
self.assertNoFormErrors(resp)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(resp, redirect)
@test.create_stubs({api.cinder: ('qos_spec_list',
'qos_spec_get',
'qos_spec_get_associations',
'volume_type_get',
'qos_spec_associate',), })
def test_associate_qos_spec(self):
volume_type = self.volume_types.first()
volume_types = self.volume_types.list()
qos_spec = self.cinder_qos_specs.first()
qos_specs = self.cinder_qos_specs.list()
# associate qos spec with volume type
formData = {'qos_spec_choice': qos_spec.id}
edit_url = reverse(
'horizon:admin:volumes:volume_types:manage_qos_spec_association',
args=[volume_type.id])
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id).AndReturn(qos_spec)
api.cinder.qos_spec_list(IsA(http.HttpRequest)) \
.AndReturn(qos_specs)
api.cinder.qos_spec_get_associations(IsA(http.HttpRequest),
qos_spec.id) \
.AndReturn(volume_types)
api.cinder.qos_spec_get_associations(IsA(http.HttpRequest),
qos_specs[1].id) \
.AndReturn(volume_types)
api.cinder.volume_type_get(IsA(http.HttpRequest),
str(volume_type.id)) \
.AndReturn(volume_type)
api.cinder.qos_spec_associate(IsA(http.HttpRequest),
qos_spec,
str(volume_type.id))
self.mox.ReplayAll()
resp = self.client.post(edit_url, formData)
redirect = reverse('horizon:admin:volumes:volume_types_tab')
self.assertNoFormErrors(resp)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(resp, redirect)
| apache-2.0 |
jkonecny12/anaconda | pyanaconda/modules/payloads/payload/factory.py | 3 | 2782 | #
# Factory class to create payloads.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.modules.payloads.constants import PayloadType
__all__ = ["PayloadFactory"]
class PayloadFactory(object):
"""Factory to create payloads."""
@staticmethod
def create_payload(payload_type: PayloadType):
"""Create a partitioning module.
:param payload_type: a payload type
:return: a payload module
"""
if payload_type == PayloadType.LIVE_IMAGE:
from pyanaconda.modules.payloads.payload.live_image.live_image import \
LiveImageModule
return LiveImageModule()
if payload_type == PayloadType.LIVE_OS:
from pyanaconda.modules.payloads.payload.live_os.live_os import LiveOSModule
return LiveOSModule()
if payload_type == PayloadType.DNF:
from pyanaconda.modules.payloads.payload.dnf.dnf import DNFModule
return DNFModule()
if payload_type == PayloadType.RPM_OSTREE:
from pyanaconda.modules.payloads.payload.rpm_ostree.rpm_ostree import RPMOSTreeModule
return RPMOSTreeModule()
raise ValueError("Unknown payload type: {}".format(payload_type))
@classmethod
def get_type_for_kickstart(cls, data):
"""Get a payload type for the given kickstart data.
:param data: a kickstart data
:return: a payload type
"""
if data.ostreesetup.seen:
return PayloadType.RPM_OSTREE
if data.liveimg.seen:
return PayloadType.LIVE_IMAGE
if data.cdrom.seen or \
data.harddrive.seen or \
data.hmc.seen or \
data.module.seen or \
data.nfs.seen or \
data.url.seen or \
data.packages.seen:
return PayloadType.DNF
return None
| gpl-2.0 |
vsimon/kubernetes | hack/verify-flags-underscore.py | 132 | 4683 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'vendor' in dirs:
dirs.remove('vendor')
if 'staging' in dirs:
dirs.remove('staging')
if '_output' in dirs:
dirs.remove('_output')
if '_gopath' in dirs:
dirs.remove('_gopath')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if '.make' in dirs:
dirs.remove('.make')
if 'BUILD' in files:
files.remove('BUILD')
for name in files:
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
# Collects all the flags used in golang files and verifies the flags do
# not contain underscore. If any flag needs to be excluded from this check,
# need to add that flag in hack/verify-flags/excluded-flags.txt.
def check_underscore_in_flags(rootdir, files):
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
check_underscore_in_flags(rootdir, files)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
edwiincao/cpp_features | coroutine/unit_test/gtest_unit/gtest/test/gtest_xml_output_unittest.py | 1815 | 14580 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| lgpl-3.0 |
myriadmobile/bitbackup | app/main.py | 1 | 8338 | #!/usr/bin/env python
import argparse
import signal
import subprocess
import sys
import threading
from datetime import datetime
import boto3
import colorama
import requests
import tarfile
import tempfile
import workerpool
from boto3.s3.transfer import S3Transfer
from colorama import Fore, Style
is_debug = False
def debug(message):
if is_debug:
print(Fore.MAGENTA + threading.current_thread().name + ': ' + Style.DIM + Fore.CYAN + message + Style.RESET_ALL)
def info(message):
print(Fore.MAGENTA + threading.current_thread().name + ': ' + Fore.CYAN + message + Style.RESET_ALL)
def success(message):
print(Fore.MAGENTA + threading.current_thread().name + ': ' + Fore.GREEN + message + Style.RESET_ALL)
def error(message):
print(Fore.MAGENTA + threading.current_thread().name + ': ' + Style.BRIGHT + Fore.RED + message + Style.RESET_ALL)
def divider():
print(Style.BRIGHT + Fore.MAGENTA + ('=' * 109) + Fore.RESET + Style.RESET_ALL)
class Bitbackup:
def __init__(self, bb_username='', bb_password='', s3_key='', s3_secret='', s3_bucket='', s3_base_path='',
s3_endpoint='https://s3.amazonaws.com', worker_count=8):
self._bb_username = bb_username
self._bb_password = bb_password
self._s3_key = s3_key
self._s3_secret = s3_secret
self._s3_bucket = s3_bucket
self._s3_base_path = s3_base_path
self._s3_endpoint = s3_endpoint
self._worker_count = worker_count
def run(self):
self._print_header()
signaler = Signaler()
bitbucket = Bitbucket(self._bb_username, self._bb_password)
git = Git()
def toolbox_factory():
s3 = S3(self._s3_key, self._s3_secret, self._s3_bucket, self._s3_base_path, self._s3_endpoint)
return BitbackupWorkerToolbox(bitbucket, git, s3)
def worker_factory(job_queue):
worker = workerpool.EquippedWorker(job_queue, toolbox_factory)
worker.setName(worker.getName().replace("Thread", "Worker"))
return worker
info('Loading repository list...')
repos = bitbucket.get_all_repositories()
info('Starting {} workers...'.format(self._worker_count))
pool = workerpool.WorkerPool(size=self._worker_count, worker_factory=worker_factory, maxjobs=1)
for repo in repos:
if signaler.should_term():
break
pool.put(BitbackupJob(repo))
pool.shutdown()
pool.wait()
self._print_footer()
def _print_header(self):
print('')
divider()
divider()
print('')
print(Style.BRIGHT + Fore.GREEN + ' Starting Bitbackup!' + Style.RESET_ALL)
print('')
print(Style.BRIGHT + ' User/Team: ' + Style.RESET_ALL + self._bb_username)
print(
Style.BRIGHT + ' Destination: ' + Style.RESET_ALL + 's3://' + self._s3_bucket + '/' + self._s3_base_path)
print('')
divider()
divider()
print('')
def _print_footer(self):
print('')
divider()
divider()
print('')
print(Style.BRIGHT + Fore.GREEN + 'Bitbackup finished!' + Style.RESET_ALL)
print('')
divider()
divider()
print('')
class BitbackupWorkerToolbox:
def __init__(self, bitbucket, git, s3):
self.bitbucket = bitbucket
self.git = git
self.s3 = s3
class BitbackupJob(workerpool.Job):
def __init__(self, repo):
super().__init__()
self._repo = repo
def run(self, toolbox=None):
repo_name = self._repo.get('full_name')
try:
clone_url = toolbox.bitbucket.get_clone_url(self._repo)
archive = toolbox.git.archive(clone_url)
key = repo_name + '.tar.gz'
toolbox.s3.upload(key, archive.name)
success('Backed up ' + repo_name + '!')
except Exception:
error('Failed to backup {}'.format(repo_name))
class Bitbucket:
def __init__(self, username, password, endpoint='https://api.bitbucket.org/2.0/'):
self._username = username
self._password = password
self._endpoint = endpoint
def _create_url(self, path):
return self._endpoint + path
def _request(self, url='', **kwargs):
kwargs['auth'] = (self._username, self._password)
response = requests.request('get', url, **kwargs)
return response.json()
def get_all_repositories(self):
repositories = []
next = self._create_url('repositories/' + self._username + '?pagelen=100&page=1')
while next is not None:
debug('Fetching repo list' + next)
response = self._request(next)
repositories += response.get('values')
next = response.get('next')
return repositories
def get_clone_url(self, repository):
clone = repository.get('links').get('clone')
for link in clone:
if link.get('name') == 'https':
href = str(link.get('href'))
return href.replace('@', ':' + self._password + '@')
return None
class Git:
def archive(self, url):
basename = url[str(url).rindex('/') + 1:]
tempdir = tempfile.TemporaryDirectory(suffix='.git')
debug('Cloning ' + basename + ' in to ' + tempdir.name + '...')
subprocess.check_output("git clone --bare --quiet {} {}".format(url, tempdir.name), shell=True)
archive = self._make_tarfile(tempdir.name, basename)
return archive
def _make_tarfile(self, source_dir, basename):
output = tempfile.NamedTemporaryFile(suffix='.tar.gz')
debug('Tar and GZ ' + source_dir + ' to ' + output.name + ' as ' + basename + '...')
with tarfile.open(output.name, "w:gz") as tar:
tar.add(source_dir, arcname=basename)
return output
class S3:
def __init__(self, access_key, secret_key, bucket, s3_base_path, s3_endpoint='https://s3.amazonaws.com'):
self._bucket = bucket
self._base_path = s3_base_path
client = boto3.client(aws_access_key_id=access_key, aws_secret_access_key=secret_key, service_name='s3',
endpoint_url=s3_endpoint)
self._transfer = S3Transfer(client)
def upload(self, key, file):
key = self._base_path + '/' + key
debug('Uploading ' + file + ' to s3://' + self._bucket + ':' + key + '...')
self._transfer.upload_file(file, self._bucket, key)
return
class Signaler:
sigterm = False
def __init__(self):
signal.signal(signal.SIGINT, self.term)
signal.signal(signal.SIGTERM, self.term)
signal.signal(signal.SIGQUIT, self.term)
def term(self, signum, frame):
self.sigterm = True
def should_term(self):
return self.sigterm
if __name__ == '__main__':
colorama.init()
parser = argparse.ArgumentParser(description='Backup Bitbucket repositories to a S3 compatible store.')
parser.add_argument('--bb-username', required=True, dest='bb_username', help='Bitbucket username or team name')
parser.add_argument('--bb-password', required=True, dest='bb_password', help='Bitbucket password or team API key')
parser.add_argument('--s3-key', required=True, dest='s3_key', help='S3 Access Key')
parser.add_argument('--s3-secret', required=True, dest='s3_secret', help='S3 Secret Key')
parser.add_argument('--s3-bucket', required=True, dest='s3_bucket', help='S3 Bucket')
parser.add_argument('--s3-base-path', dest='s3_base_path', default=datetime.now().strftime("%Y-%m-%d-%H:%M"),
help='S3 base path')
parser.add_argument('--s3-endpoint', dest='s3_endpoint', default='https://s3.amazonaws.com', help='S3 host')
parser.add_argument('--workers', dest='worker_count', type=int, default=8, help='The number of worker threads')
parser.add_argument('--debug', dest='is_debug', action='store_true', help='Print debug messages')
args = vars(parser.parse_args())
is_debug = args.pop('is_debug')
bitbackup = Bitbackup(**args)
bitbackup.run()
try:
bitbackup = Bitbackup(**args)
bitbackup.run()
except Exception as e:
error('Backup failed: {}'.format(e))
sys.exit(1)
| mit |
shadowkun/Ogre4Rviz | Tools/Wings3DExporter/xmlout.py | 34 | 1531 |
# extremely simple XML writer
#
# This is to remove libxml2 dependency on platforms where it's
# difficult to build
#
# 2003 Attila Tajti <attis@spacehawks.hu>
class XMLDoc:
def __init__(self, version):
self.version = version
self.root_element = None
def saveFile(self, filename):
f = file(filename, "w")
f.write('<?xml version="' + self.version + '"?>\n')
self.root_element._write(f, 0)
def saveFormatFile(self, filename, fmt):
self.saveFile(filename)
def freeDoc(self):
pass
class XMLNode:
def __init__(self, name):
self.name = name
self.props = []
self.children = []
self.content = None
def docSetRootElement(self, doc):
doc.root_element = self
def newChild(self, namespace, name, content):
if namespace:
fullname = namespace + ':' + name
else:
fullname = name
child = XMLNode(fullname)
child.content = content
self.children.append(child)
return child
def setProp(self, name, value):
self.props.append((name, value))
def _write(self, f, indent):
#istr = " " * indent
istr = "\t" * indent
# put together our tag
tag = self.name
for prop in self.props:
name, value = prop
tag += ' ' + name + '="' + value + '"'
# print tag, or children between tags
if self.children:
f.write(istr + '<%s>\n' % tag)
for child in self.children:
child._write(f, indent + 1)
f.write(istr + '</%s>\n' % self.name)
else:
f.write(istr + '<%s/>\n' % tag)
def newDoc(version):
return XMLDoc(version)
def newNode(name):
return XMLNode(name)
| mit |
davidgbe/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
embeddedarm/android_external_chromium_org | chrome/test/pyautolib/plugins_info.py | 69 | 3510 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Python representation for Chromium Plugins info.
This is the info available at about:plugins.
Obtain one of these from PyUITestSuite::GetPluginsInfo() call.
Example:
class MyTest(pyauto.PyUITest):
def testBasic(self):
info = self.GetPluginsInfo() # fetch plugins snapshot
print info.Plugins()
See more examples in chrome/test/functional/plugins.py.
"""
import simplejson as json
from pyauto_errors import JSONInterfaceError
class PluginsInfo(object):
"""Represent info for Chromium plugins.
The info is represented as a list of dictionaries, one for each plugin.
"""
def __init__(self, plugins_dict):
"""Initialize a PluginsInfo from a json string.
Args:
plugins_dict: a dictionary returned by the automation command
'GetPluginsInfo'.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# JSON string prepared in GetPluginsInfo() in automation_provider.cc
self.pluginsdict = plugins_dict
if self.pluginsdict.has_key('error'):
raise JSONInterfaceError(self.pluginsdict['error'])
def Plugins(self):
"""Get plugins.
Returns:
a list of plugins info
Sample:
[ { u'desc': u'Shockwave Flash 10.0 r45',
u'enabled': True,
u'mimeTypes': [ { u'description': u'Shockwave Flash',
u'fileExtensions': [u'swf'],
u'mimeType': u'application/x-shockwave-flash'},
{ u'description': u'FutureSplash Player',
u'fileExtensions': [u'spl'],
u'mimeType': u'application/futuresplash'}],
u'name': u'Shockwave Flash',
u'path': u'/Library/Internet Plug-Ins/Flash Player.plugin',
u'version': u'10.0.45.2'},
{ u'desc': u'Version 1.1.2.9282',
u'enabled': True,
u'mimeTypes': [ { u'description': u'Google voice and video chat',
u'fileExtensions': [u'googletalk'],
u'mimeType': u'application/googletalk'}],
u'name': u'Google Talk NPAPI Plugin',
u'path': u'/Library/Internet Plug-Ins/googletalkbrowserplugin.plugin',
u'version': u'1.1.2.9282'},
...,
...,
]
"""
return self.pluginsdict.get('plugins', [])
def PluginForPath(self, path):
"""Get plugin info for the given plugin path.
Returns:
a dictionary of info for the plugin.
"""
got = filter(lambda x: x['path'] == path, self.Plugins())
if not got: return None
return got[0]
def PluginForName(self, name):
"""Get plugin info for the given name.
There might be several plugins with the same name.
Args:
name: the name for which to look for.
Returns:
a list of info dictionaries for each plugin found with the given name.
"""
return filter(lambda x: x['name'] == name, self.Plugins())
def FirstPluginForName(self, name):
"""Get plugin info for the first plugin with the given name.
This is useful in case there are multiple plugins for a name.
Args:
name: the name for which to look for.
Returns:
a plugin info dictionary
None, if not found
"""
all = self.PluginForName(name)
if not all: return None
return all[0]
| bsd-3-clause |
chris-allan/pyshop | pyshop/helpers/download.py | 7 | 4458 | #-*- coding: utf-8 -*-
"""
Pyramid helper that mirror and serve file automatically from PyPI.
Package are mirrored safety by validating the PyPI certificate.
It is possible to use an HTTP Proxy. Pyshop use the requests library, so,
you just have to export your proxy in the environment variable `https_proxy`.
"""
import os
import os.path
import mimetypes
import logging
import tempfile
import tarfile
import zipfile
import shutil
import requests
from zope.interface import implementer
from pyramid.interfaces import ITemplateRenderer
from pyramid.exceptions import NotFound
log = logging.getLogger(__name__)
# registering mimetype for egg files
mimetypes.add_type('x-application/egg', '.egg')
mimetypes.add_type('x-application/whl', '.whl')
def build_whl(source, dest):
tempdir = None
olddir = os.path.abspath(os.curdir)
try:
tempdir = tempfile.mkdtemp(prefix='pyshop')
# FIXME: .zip is not supported yet
if source.endswith('.zip'):
with zipfile.ZipFile(source, 'r') as arch:
if [file for file in arch.namelist() if '..' in file]:
raise RuntimeError('Archive is not safe')
arch.extractall(tempdir)
else:
arch = tarfile.open(source)
try:
if [file for file in arch.getnames() if '..' in file]:
raise RuntimeError('Archive is not safe')
arch.extractall(tempdir)
finally:
arch.close()
os.chdir(os.path.join(tempdir, os.listdir(tempdir)[0]))
os.system('python setup.py bdist_wheel')
distdir = os.path.join(tempdir, os.listdir(tempdir)[0], 'dist')
wheel = os.path.join(distdir, os.listdir(distdir)[0])
# XXX
# As we already have serve a filename, we must respect it
# if the archive is intended for build for a specific platform,
# like Linux-x86_64 it will be renamed to "any" but only works
# for Linux-x86_64.
shutil.move(wheel, dest)
finally:
os.chdir(olddir)
if tempdir:
shutil.rmtree(tempdir)
@implementer(ITemplateRenderer)
class ReleaseFileRenderer(object):
"""Renderer that serve the python package"""
def __init__(self, repository_root):
self.repository_root = repository_root
def __call__(self, value, system):
if 'request' in system:
request = system['request']
filename = value['filename']
mime, encoding = mimetypes.guess_type(filename)
request.response.content_type = mime
if encoding:
request.response.encoding = encoding
localpath = os.path.join(self.repository_root, filename[0].lower(),
filename)
if not os.path.exists(localpath):
sdistpath = os.path.join(self.repository_root,
value['original'][0].lower(),
value['original'])
dir_ = os.path.join(self.repository_root,
value['original'][0].lower())
if not os.path.exists(dir_):
os.makedirs(dir_, 0o750)
verify = value['url'].startswith('https:')
log.info('Downloading {0}'.format(value['url']))
resp = requests.get(value['url'], verify=verify)
if resp.status_code == 404:
raise NotFound('Resource {0} not found'.format(value['original']))
resp.raise_for_status()
with open(sdistpath, 'wb') as file:
file.write(resp.content)
if not value['whlify']:
return resp.content
build_whl(sdistpath, localpath)
data = b''
with open(localpath, 'rb') as file:
data = b''
while True:
content = file.read(2 << 16)
if not content:
break
data += content
return data
def renderer_factory(info):
"""
Create the :class:`pyshop.helpers.download.ReleaseFileRenderer` factory.
Packages are stored in the directory in the configuration key
``pyshop.repository`` of the paste ini file.
"""
return ReleaseFileRenderer(info.settings['pyshop.repository'])
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
kerstin/moviepy | moviepy/video/tools/tracking.py | 1 | 6088 | """
This module contains different functions for tracking objects in videos,
manually or automatically. The tracking functions return results under
the form: ``( txy, (fx,fy) )`` where txy is of the form [(ti, xi, yi)...]
and (fx(t),fy(t)) give the position of the track for all times t (if the
time t is out of the time bounds of the tracking time interval
fx and fy return the position of the object at the start or at the end
of the tracking time interval).
"""
from scipy.interpolate import interp1d
from ..io.preview import imdisplay
from .interpolators import Trajectory
from moviepy.decorators import (convert_to_seconds, use_clip_fps_by_default)
try:
import cv2
autotracking_possible = True
except:
# Note: this will be later fixed with scipy/skimage replacements
# but for the moment OpenCV is mandatory, so...
autotracking_possible = False
# WE START WITH A TOOL FUNCTION
# MANUAL TRACKING
@convert_to_seconds(["t1","t2"])
@use_clip_fps_by_default
def manual_tracking(clip, t1=None, t2=None, fps=None, nobjects = 1,
savefile = None):
"""
Allows manual tracking of an object(s) in the video clip between
times `t1` and `t2`. This displays the clip frame by frame
and you must click on the object(s) in each frame. If ``t2=None``
only the frame at ``t1`` is taken into account.
Returns a list [(t1,x1,y1),(t2,x2,y2) etc... ] if there is one
object per frame, else returns a list whose elements are of the
form (ti, [(xi1,yi1), (xi2,yi2), ...] )
Parameters
-------------
t1,t2:
times during which to track (defaults are start and
end of the clip). t1 and t2 can be expressed in seconds
like 15.35, in (min, sec), in (hour, min, sec), or as a
string: '01:03:05.35'.
fps:
Number of frames per second to freeze on. If None, the clip's
fps attribute is used instead.
nobjects:
Number of objects to click on each frame.
savefile:
If provided, the result is saved to a file, which makes
it easier to edit and re-use later.
Examples
---------
>>> from moviepy.editor import VideoFileClip
>>> from moviepy.video.tools.tracking import manual_tracking
>>> clip = VideoFileClip("myvideo.mp4")
>>> # manually indicate 3 trajectories, save them to a file
>>> trajectories = manual_tracking(clip, t1=5, t2=7, fps=5,
nobjects=3, savefile="track.txt")
>>> # ...
>>> # LATER, IN ANOTHER SCRIPT, RECOVER THESE TRAJECTORIES
>>> from moviepy.video.tools.tracking import Trajectory
>>> traj1, traj2, traj3 = Trajectory.load_list('track.txt')
>>> # If ever you only have one object being tracked, recover it with
>>> traj, = Trajectory.load_list('track.txt')
"""
import pygame as pg
screen = pg.display.set_mode(clip.size)
step = 1.0 / fps
if (t1 is None) and (t2 is None):
t1,t2 = 0, clip.duration
elif (t2 is None):
t2 = t1 + step / 2
t = t1
txy_list = []
def gatherClicks(t):
imdisplay(clip.get_frame(t), screen)
objects_to_click = nobjects
clicks = []
while objects_to_click:
for event in pg.event.get():
if event.type == pg.KEYDOWN:
if (event.key == pg.K_BACKSLASH):
return "return"
elif (event.key == pg.K_ESCAPE):
raise KeyboardInterrupt()
elif event.type == pg.MOUSEBUTTONDOWN:
x, y = pg.mouse.get_pos()
clicks.append((x, y))
objects_to_click -= 1
return clicks
while t < t2:
clicks =gatherClicks(t)
if clicks == 'return':
txy_list.pop()
t -= step
else:
txy_list.append((t,clicks))
t += step
tt, xylist = zip(*txy_list)
result = []
for i in range(nobjects):
xys = [e[i] for e in xylist]
xx, yy = zip(*xys)
result.append(Trajectory(tt, xx, yy))
if savefile is not None:
Trajectory.save_list(result, savefile)
return result
# AUTOMATED TRACKING OF A PATTERN
def findAround(pic,pat,xy=None,r=None):
"""
find image pattern ``pat`` in ``pic[x +/- r, y +/- r]``.
if xy is none, consider the whole picture.
"""
if xy and r:
h,w = pat.shape[:2]
x,y = xy
pic = pic[y-r : y+h+r , x-r : x+w+r]
matches = cv2.matchTemplate(pat,pic,cv2.TM_CCOEFF_NORMED)
yf,xf = np.unravel_index(matches.argmax(),matches.shape)
return (x-r+xf,y-r+yf) if (xy and r) else (xf,yf)
def autoTrack(clip, pattern, tt=None, fps=None, radius=20, xy0=None):
"""
Tracks a given pattern (small image array) in a video clip.
Returns [(x1,y1),(x2,y2)...] where xi,yi are
the coordinates of the pattern in the clip on frame i.
To select the frames you can either specify a list of times with ``tt``
or select a frame rate with ``fps``.
This algorithm assumes that the pattern's aspect does not vary much
and that the distance between two occurences of the pattern in
two consecutive frames is smaller than ``radius`` (if you set ``radius``
to -1 the pattern will be searched in the whole screen at each frame).
You can also provide the original position of the pattern with xy0.
"""
if not autotracking_possible:
raise IOError("Sorry, autotrack requires OpenCV for the moment. "
"Install OpenCV (aka cv2) to use it.")
if not xy0:
xy0 = findAround(clip.get_frame(tt[0]),pattern)
if tt is None:
tt = np.arange(0, clip.duration, 1.0/fps)
xys = [xy0]
for t in tt[1:]:
xys.append( findAround(clip.get_frame(t),pattern,
xy=xys[-1],r=radius))
xx,yy = zip(*xys)
return Trajectory(tt, xx, yy)
| mit |
tysonclugg/django | django/db/backends/postgresql/introspection.py | 16 | 10779 | import warnings
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.deprecation import RemovedInDjango21Warning
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
2950: 'UUIDField',
}
ignored_tables = []
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s"""
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.default and 'nextval' in description.default:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [
FieldInfo(*(line[0:6] + (field_map[line.name][0] == 'YES', field_map[line.name][1])))
for line in cursor.description
]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns. Also retrieve the definition of expression-based
indexes.
"""
constraints = {}
# Loop over the key table, collecting things as constraints. The column
# array must return column names in the same order in which they were
# created.
# The subquery containing generate_series can be replaced with
# "WITH ORDINALITY" when support for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
c.conname,
array(
SELECT attname
FROM (
SELECT unnest(c.conkey) AS colid,
generate_series(1, array_length(c.conkey, 1)) AS arridx
) AS cols
JOIN pg_attribute AS ca ON cols.colid = ca.attnum
WHERE ca.attrelid = c.conrelid
ORDER BY cols.arridx
),
c.contype,
(SELECT fkc.relname || '.' || fka.attname
FROM pg_attribute AS fka
JOIN pg_class AS fkc ON fka.attrelid = fkc.oid
WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]),
cl.reloptions
FROM pg_constraint AS c
JOIN pg_class AS cl ON c.conrelid = cl.oid
JOIN pg_namespace AS ns ON cl.relnamespace = ns.oid
WHERE ns.nspname = %s AND cl.relname = %s
""", ["public", table_name])
for constraint, columns, kind, used_cols, options in cursor.fetchall():
constraints[constraint] = {
"columns": columns,
"primary_key": kind == "p",
"unique": kind in ["p", "u"],
"foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None,
"check": kind == "c",
"index": False,
"definition": None,
"options": options,
}
# Now get indexes
# The row_number() function for ordering the index fields can be
# replaced by WITH ORDINALITY in the unnest() functions when support
# for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
indexname, array_agg(attname ORDER BY rnum), indisunique, indisprimary,
array_agg(ordering ORDER BY rnum), amname, exprdef, s2.attoptions
FROM (
SELECT
row_number() OVER () as rnum, c2.relname as indexname,
idx.*, attr.attname, am.amname,
CASE
WHEN idx.indexprs IS NOT NULL THEN
pg_get_indexdef(idx.indexrelid)
END AS exprdef,
CASE am.amname
WHEN 'btree' THEN
CASE (option & 1)
WHEN 1 THEN 'DESC' ELSE 'ASC'
END
END as ordering,
c2.reloptions as attoptions
FROM (
SELECT
*, unnest(i.indkey) as key, unnest(i.indoption) as option
FROM pg_index i
) idx
LEFT JOIN pg_class c ON idx.indrelid = c.oid
LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid
LEFT JOIN pg_am am ON c2.relam = am.oid
LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key
WHERE c.relname = %s
) s2
GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions;
""", [table_name])
for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": columns if columns != [None] else [],
"orders": orders if orders != [None] else [],
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
"type": Index.suffix if type_ == 'btree' else type_,
"definition": definition,
"options": options,
}
return constraints
| bsd-3-clause |
40223138/2015cd_midterm | gear.py | 204 | 19237 | #@+leo-ver=5-thin
#@+node:office.20150407074720.1: * @file gear.py
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:office.20150407074720.2: ** <<declarations>> (application)
#@@language python
import cherrypy
import os
import sys
# 這個程式要計算正齒輪的齒面寬, 資料庫連結希望使用 pybean 與 SQLite
# 導入 pybean 模組與所要使用的 Store 及 SQLiteWriter 方法
from pybean import Store, SQLiteWriter
import math
# 確定程式檔案所在目錄, 在 Windows 有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 將所在目錄設為系統搜尋目錄
sys.path.append(_curdir)
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# while program is executed in OpenShift
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# while program is executed in localhost
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
# 這是 Gear 設計資料表的定義
'''
lewis.db 中有兩個資料表, steel 與 lewis
CREATE TABLE steel (
serialno INTEGER,
unsno TEXT,
aisino TEXT,
treatment TEXT,
yield_str INTEGER,
tensile_str INTEGER,
stretch_ratio INTEGER,
sectional_shr INTEGER,
brinell INTEGER
);
CREATE TABLE lewis (
serialno INTEGER PRIMARY KEY
NOT NULL,
gearno INTEGER,
type1 NUMERIC,
type4 NUMERIC,
type3 NUMERIC,
type2 NUMERIC
);
'''
#@-<<declarations>>
#@+others
#@+node:office.20150407074720.3: ** class Gear
class Gear(object):
#@+others
#@+node:office.20150407074720.4: *3* __init__
def __init__(self):
# hope to create downloads and images directories
if not os.path.isdir(download_root_dir+"downloads"):
try:
os.makedirs(download_root_dir+"downloads")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"images"):
try:
os.makedirs(download_root_dir+"images")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"tmp"):
try:
os.makedirs(download_root_dir+"tmp")
except:
print("mkdir error")
#@+node:office.20150407074720.5: *3* default
@cherrypy.expose
def default(self, attr='default', *args, **kwargs):
raise cherrypy.HTTPRedirect("/")
#@+node:office.20150407074720.6: *3* index
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
# 進行資料庫檔案連結, 並且取出所有資料
try:
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
# 因為程式以 application 所在目錄執行, 因此利用相對目錄連結 lewis.db 資料庫檔案
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
#material = SQLite連結.find_one("steel","serialno = ?",[序號])
# str(SQLite連結.count("steel")) 將傳回 70, 表示資料庫中有 70 筆資料
material = SQLite連結.find("steel")
# 所傳回的 material 為 iterator
'''
outstring = ""
for material_item in material:
outstring += str(material_item.serialno) + ":" + material_item.unsno + "_" + material_item.treatment + "<br />"
return outstring
'''
except:
return "抱歉! 資料庫無法連線<br />"
outstring = '''
<form id=entry method=post action="gear_width">
請填妥下列參數,以完成適當的齒尺寸大小設計。<br />
馬達馬力:<input type=text name=horsepower id=horsepower value=100 size=10>horse power<br />
馬達轉速:<input type=text name=rpm id=rpm value=1120 size=10>rpm<br />
齒輪減速比: <input type=text name=ratio id=ratio value=4 size=10><br />
齒形:<select name=toothtype id=toothtype>
<option value=type1>壓力角20度,a=0.8,b=1.0
<option value=type2>壓力角20度,a=1.0,b=1.25
<option value=type3>壓力角25度,a=1.0,b=1.25
<option value=type4>壓力角25度,a=1.0,b=1.35
</select><br />
安全係數:<input type=text name=safetyfactor id=safetyfactor value=3 size=10><br />
齒輪材質:<select name=material_serialno id=material_serialno>
'''
for material_item in material:
outstring += "<option value=" + str(material_item.serialno) + ">UNS - " + \
material_item.unsno + " - " + material_item.treatment
outstring += "</select><br />"
outstring += "小齒輪齒數:<input type=text name=npinion id=npinion value=18 size=10><br />"
outstring += "<input type=submit id=submit value=進行運算>"
outstring += "</form>"
return outstring
#@+node:office.20150407074720.7: *3* interpolation
@cherrypy.expose
def interpolation(self, small_gear_no=18, gear_type=1):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
# 使用內插法求值
# 找出比目標齒數大的其中的最小的,就是最鄰近的大值
lewis_factor = SQLite連結.find_one("lewis","gearno > ?",[small_gear_no])
if(gear_type == 1):
larger_formfactor = lewis_factor.type1
elif(gear_type == 2):
larger_formfactor = lewis_factor.type2
elif(gear_type == 3):
larger_formfactor = lewis_factor.type3
else:
larger_formfactor = lewis_factor.type4
larger_toothnumber = lewis_factor.gearno
# 找出比目標齒數小的其中的最大的,就是最鄰近的小值
lewis_factor = SQLite連結.find_one("lewis","gearno < ? order by gearno DESC",[small_gear_no])
if(gear_type == 1):
smaller_formfactor = lewis_factor.type1
elif(gear_type == 2):
smaller_formfactor = lewis_factor.type2
elif(gear_type == 3):
smaller_formfactor = lewis_factor.type3
else:
smaller_formfactor = lewis_factor.type4
smaller_toothnumber = lewis_factor.gearno
calculated_factor = larger_formfactor + (small_gear_no - larger_toothnumber) * (larger_formfactor - smaller_formfactor) / (larger_toothnumber - smaller_toothnumber)
# 只傳回小數點後五位數
return str(round(calculated_factor, 5))
#@+node:office.20150407074720.8: *3* gear_width
# 改寫為齒面寬的設計函式
@cherrypy.expose
def gear_width(self, horsepower=100, rpm=1000, ratio=4, toothtype=1, safetyfactor=2, material_serialno=1, npinion=18):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
outstring = ""
# 根據所選用的齒形決定壓力角
if(toothtype == 1 or toothtype == 2):
壓力角 = 20
else:
壓力角 = 25
# 根據壓力角決定最小齒數
if(壓力角== 20):
最小齒數 = 18
else:
最小齒數 = 12
# 直接設最小齒數
if int(npinion) <= 最小齒數:
npinion = 最小齒數
# 大於400的齒數則視為齒條(Rack)
if int(npinion) >= 400:
npinion = 400
# 根據所選用的材料查詢強度值
# 由 material之序號查 steel 表以得材料之降伏強度S單位為 kpsi 因此查得的值要成乘上1000
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
#SQLite連結 = Store(SQLiteWriter("lewis.db", frozen=True))
# 指定 steel 資料表
steel = SQLite連結.new("steel")
# 資料查詢
#material = SQLite連結.find_one("steel","unsno=? and treatment=?",[unsno, treatment])
material = SQLite連結.find_one("steel","serialno=?",[material_serialno])
# 列出 steel 資料表中的資料筆數
#print(SQLite連結.count("steel"))
#print (material.yield_str)
strengthstress = material.yield_str*1000
# 由小齒輪的齒數與齒形類別,查詢lewis form factor
# 先查驗是否有直接對應值
on_table = SQLite連結.count("lewis","gearno=?",[npinion])
if on_table == 1:
# 直接進入設計運算
#print("直接運算")
#print(on_table)
lewis_factor = SQLite連結.find_one("lewis","gearno=?",[npinion])
#print(lewis_factor.type1)
# 根據齒形查出 formfactor 值
if(toothtype == 1):
formfactor = lewis_factor.type1
elif(toothtype == 2):
formfactor = lewis_factor.type2
elif(toothtype == 3):
formfactor = lewis_factor.type3
else:
formfactor = lewis_factor.type4
else:
# 沒有直接對應值, 必須進行查表內插運算後, 再執行設計運算
#print("必須內插")
#print(interpolation(npinion, gear_type))
formfactor = self.interpolation(npinion, toothtype)
# 開始進行設計運算
ngear = int(npinion) * int(ratio)
# 重要的最佳化設計---儘量用整數的diametralpitch
# 先嘗試用整數算若 diametralpitch 找到100 仍無所獲則改用 0.25 作為增量再不行則宣告 fail
counter = 0
i = 0.1
facewidth = 0
circularpitch = 0
while (facewidth <= 3 * circularpitch or facewidth >= 5 * circularpitch):
diametralpitch = i
#circularpitch = 3.14159/diametralpitch
circularpitch = math.pi/diametralpitch
pitchdiameter = int(npinion)/diametralpitch
#pitchlinevelocity = 3.14159*pitchdiameter*rpm/12
pitchlinevelocity = math.pi*pitchdiameter * float(rpm)/12
transmittedload = 33000*float(horsepower)/pitchlinevelocity
velocityfactor = 1200/(1200 + pitchlinevelocity)
# formfactor is Lewis form factor
# formfactor need to get from table 13-3 and determined ty teeth number and type of tooth
# formfactor = 0.293
# 90 is the value get from table corresponding to material type
facewidth = transmittedload*diametralpitch*float(safetyfactor)/velocityfactor/formfactor/strengthstress
if(counter>5000):
outstring += "超過5000次的設計運算,仍無法找到答案!<br />"
outstring += "可能所選用的傳遞功率過大,或無足夠強度的材料可以使用!<br />"
# 離開while迴圈
break
i += 0.1
counter += 1
facewidth = round(facewidth, 4)
if(counter<5000):
# 先載入 cube 程式測試
#outstring = self.cube_weblink()
# 再載入 gear 程式測試
outstring = self.gear_weblink()
outstring += "進行"+str(counter)+"次重複運算後,得到合用的facewidth值為:"+str(facewidth)
return outstring
#@+node:office.20150407074720.9: *3* cube_weblink
@cherrypy.expose
def cube_weblink(self):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">
document.writeln ("Error loading Pro/Web.Link header!");
</script>
<script type="text/javascript" language="JavaScript">
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("cube.prt", "v:/tmp", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("cube.prt"));
var solid = session.GetModel("cube.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("a1");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("a2");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=20.0;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("cube.prt", "v:/tmp", "cube"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
</script>
'''
return outstring
#@+node:office.20150407074720.10: *3* gear_weblink
@cherrypy.expose
def gear_weblink(self, facewidth=5, n=18):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">// <![CDATA[
document.writeln ("Error loading Pro/Web.Link header!");
// ]]></script>
<script type="text/javascript" language="JavaScript">// <![CDATA[
if (!pfcIsWindows()) netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("gear.prt", "v:/", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("gear.prt"));
var solid = session.GetModel("gear.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("n");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("face_width");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=25+i*2;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
//d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d1Value = pfcCreate ("MpfcModelItem").CreateIntParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("gear.prt", "v:/", "mygear_"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
// ]]></script>
'''
return outstring
#@-others
#@-others
root = Gear()
# setup static, images and downloads directories
application_conf = {
'/static':{
'tools.staticdir.on': True,
'tools.staticdir.dir': _curdir+"/static"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"}
}
# if inOpenshift ('OPENSHIFT_REPO_DIR' exists in environment variables) or not inOpenshift
if __name__ == '__main__':
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# operate in OpenShift
application = cherrypy.Application(root, config = application_conf)
else:
# operate in localhost
cherrypy.quickstart(root, config = application_conf)
#@-leo
| gpl-3.0 |
jiwanlimbu/aura | keystone/tests/unit/fakeldap.py | 1 | 24274 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake LDAP server for test harness.
This class does very little error checking, and knows nothing about ldap
class definitions. It implements the minimum emulation of the python ldap
library to work with keystone.
"""
import random
import re
import shelve
import ldap
from oslo_log import log
import six
from six import moves
import keystone.conf
from keystone import exception
from keystone.identity.backends.ldap import common
SCOPE_NAMES = {
ldap.SCOPE_BASE: 'SCOPE_BASE',
ldap.SCOPE_ONELEVEL: 'SCOPE_ONELEVEL',
ldap.SCOPE_SUBTREE: 'SCOPE_SUBTREE',
}
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa366991(v=vs.85).aspx # noqa
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LOG = log.getLogger(__name__)
CONF = keystone.conf.CONF
def _internal_attr(attr_name, value_or_values):
def normalize_value(value):
return common.utf8_decode(value)
def normalize_dn(dn):
# Capitalize the attribute names as an LDAP server might.
# NOTE(blk-u): Special case for this tested value, used with
# test_user_id_comma. The call to str2dn here isn't always correct
# here, because `dn` is escaped for an LDAP filter. str2dn() normally
# works only because there's no special characters in `dn`.
if dn == 'cn=Doe\\5c, John,ou=Users,cn=example,cn=com':
return 'CN=Doe\\, John,OU=Users,CN=example,CN=com'
# NOTE(blk-u): Another special case for this tested value. When a
# roleOccupant has an escaped comma, it gets converted to \2C.
if dn == 'cn=Doe\\, John,ou=Users,cn=example,cn=com':
return 'CN=Doe\\2C John,OU=Users,CN=example,CN=com'
try:
dn = ldap.dn.str2dn(common.utf8_encode(dn))
except ldap.DECODING_ERROR:
# NOTE(amakarov): In case of IDs instead of DNs in group members
# they must be handled as regular values.
return normalize_value(dn)
norm = []
for part in dn:
name, val, i = part[0]
name = common.utf8_decode(name)
name = name.upper()
norm.append([(name, val, i)])
return common.utf8_decode(ldap.dn.dn2str(norm))
if attr_name in ('member', 'roleOccupant'):
attr_fn = normalize_dn
else:
attr_fn = normalize_value
if isinstance(value_or_values, list):
return [attr_fn(x) for x in value_or_values]
return [attr_fn(value_or_values)]
def _match_query(query, attrs, attrs_checked):
"""Match an ldap query to an attribute dictionary.
The characters &, |, and ! are supported in the query. No syntax checking
is performed, so malformed queries will not work correctly.
"""
# cut off the parentheses
inner = query[1:-1]
if inner.startswith(('&', '|')):
if inner[0] == '&':
matchfn = all
else:
matchfn = any
# cut off the & or |
groups = _paren_groups(inner[1:])
return matchfn(_match_query(group, attrs, attrs_checked)
for group in groups)
if inner.startswith('!'):
# cut off the ! and the nested parentheses
return not _match_query(query[2:-1], attrs, attrs_checked)
(k, _sep, v) = inner.partition('=')
attrs_checked.add(k.lower())
return _match(k, v, attrs)
def _paren_groups(source):
"""Split a string into parenthesized groups."""
count = 0
start = 0
result = []
for pos in moves.range(len(source)):
if source[pos] == '(':
if count == 0:
start = pos
count += 1
if source[pos] == ')':
count -= 1
if count == 0:
result.append(source[start:pos + 1])
return result
def _match(key, value, attrs):
"""Match a given key and value against an attribute list."""
def match_with_wildcards(norm_val, val_list):
# Case insensitive checking with wildcards
if norm_val.startswith('*'):
if norm_val.endswith('*'):
# Is the string anywhere in the target?
for x in val_list:
if norm_val[1:-1] in x:
return True
else:
# Is the string at the end of the target?
for x in val_list:
if (norm_val[1:] ==
x[len(x) - len(norm_val) + 1:]):
return True
elif norm_val.endswith('*'):
# Is the string at the start of the target?
for x in val_list:
if norm_val[:-1] == x[:len(norm_val) - 1]:
return True
else:
# Is the string an exact match?
for x in val_list:
if check_value == x:
return True
return False
if key not in attrs:
return False
# This is a pure wild card search, so the answer must be yes!
if value == '*':
return True
if key == 'serviceId':
# for serviceId, the backend is returning a list of numbers
# make sure we convert them to strings first before comparing
# them
str_sids = [six.text_type(x) for x in attrs[key]]
return six.text_type(value) in str_sids
if key != 'objectclass':
check_value = _internal_attr(key, value)[0].lower()
norm_values = list(
_internal_attr(key, x)[0].lower() for x in attrs[key])
return match_with_wildcards(check_value, norm_values)
# it is an objectclass check, so check subclasses
values = _subs(value)
for v in values:
if v in attrs[key]:
return True
return False
def _subs(value):
"""Return a list of subclass strings.
The strings represent the ldap objectclass plus any subclasses that
inherit from it. Fakeldap doesn't know about the ldap object structure,
so subclasses need to be defined manually in the dictionary below.
"""
subs = {'groupOfNames': ['keystoneTenant',
'keystoneRole',
'keystoneTenantRole']}
if value in subs:
return [value] + subs[value]
return [value]
server_fail = False
class FakeShelve(dict):
def sync(self):
pass
FakeShelves = {}
PendingRequests = {}
class FakeLdap(common.LDAPHandler):
"""Emulate the python-ldap API.
The python-ldap API requires all strings to be UTF-8 encoded. This
is assured by the caller of this interface
(i.e. KeystoneLDAPHandler).
However, internally this emulation MUST process and store strings
in a canonical form which permits operations on
characters. Encoded strings do not provide the ability to operate
on characters. Therefore this emulation accepts UTF-8 encoded
strings, decodes them to unicode for operations internal to this
emulation, and encodes them back to UTF-8 when returning values
from the emulation.
"""
__prefix = 'ldap:'
def __init__(self, conn=None):
super(FakeLdap, self).__init__(conn=conn)
self._ldap_options = {ldap.OPT_DEREF: ldap.DEREF_NEVER}
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None,
conn_timeout=None):
if url.startswith('fake://memory'):
if url not in FakeShelves:
FakeShelves[url] = FakeShelve()
self.db = FakeShelves[url]
else:
self.db = shelve.open(url[7:])
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError('Invalid TLS / LDAPS combination')
if use_tls:
if tls_cacertfile:
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in list(common.LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
raise ValueError("invalid TLS_REQUIRE_CERT tls_req_cert=%s",
tls_req_cert)
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
self.use_pool = use_pool
self.pool_size = pool_size
self.pool_retry_max = pool_retry_max
self.pool_retry_delay = pool_retry_delay
self.pool_conn_timeout = pool_conn_timeout
self.pool_conn_lifetime = pool_conn_lifetime
self.conn_timeout = conn_timeout
def dn(self, dn):
return common.utf8_decode(dn)
def _dn_to_id_attr(self, dn):
return common.utf8_decode(
ldap.dn.str2dn(common.utf8_encode(dn))[0][0][0])
def _dn_to_id_value(self, dn):
return common.utf8_decode(
ldap.dn.str2dn(common.utf8_encode(dn))[0][0][1])
def key(self, dn):
return '%s%s' % (self.__prefix, self.dn(dn))
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
"""Provide for compatibility but this method is ignored."""
if server_fail:
raise ldap.SERVER_DOWN
whos = ['cn=Admin', CONF.ldap.user]
if (common.utf8_decode(who) in whos and
common.utf8_decode(cred) in ['password', CONF.ldap.password]):
return
attrs = self.db.get(self.key(who))
if not attrs:
LOG.debug('who=%s not found, binding anonymously',
common.utf8_decode(who))
db_password = ''
if attrs:
try:
db_password = attrs['userPassword'][0]
except (KeyError, IndexError):
LOG.debug('bind fail: password for who=%s not found',
common.utf8_decode(who))
raise ldap.INAPPROPRIATE_AUTH
if cred != common.utf8_encode(db_password):
LOG.debug('bind fail: password for who=%s does not match',
common.utf8_decode(who))
raise ldap.INVALID_CREDENTIALS
def unbind_s(self):
"""Provide for compatibility but this method is ignored."""
if server_fail:
raise ldap.SERVER_DOWN
def add_s(self, dn, modlist):
"""Add an object with the specified attributes at dn."""
if server_fail:
raise ldap.SERVER_DOWN
id_attr_in_modlist = False
id_attr = self._dn_to_id_attr(dn)
id_value = self._dn_to_id_value(dn)
# The LDAP API raises a TypeError if attr name is None.
for k, dummy_v in modlist:
if k is None:
raise TypeError('must be string, not None. modlist=%s' %
modlist)
if k == id_attr:
for val in dummy_v:
if common.utf8_decode(val) == id_value:
id_attr_in_modlist = True
if not id_attr_in_modlist:
LOG.debug('id_attribute=%(attr)s missing, attributes=%(attrs)s' %
{'attr': id_attr, 'attrs': modlist})
raise ldap.NAMING_VIOLATION
key = self.key(dn)
LOG.debug('add item: dn=%(dn)s, attrs=%(attrs)s', {
'dn': common.utf8_decode(dn), 'attrs': modlist})
if key in self.db:
LOG.debug('add item failed: dn=%s is already in store.',
common.utf8_decode(dn))
raise ldap.ALREADY_EXISTS(dn)
self.db[key] = {k: _internal_attr(k, v) for k, v in modlist}
self.db.sync()
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
return self.delete_ext_s(dn, serverctrls=[])
def _getChildren(self, dn):
return [k for k, v in self.db.items()
if re.match('%s.*,%s' % (
re.escape(self.__prefix),
re.escape(self.dn(dn))), k)]
def delete_ext_s(self, dn, serverctrls, clientctrls=None):
"""Remove the ldap object at specified dn."""
if server_fail:
raise ldap.SERVER_DOWN
try:
key = self.key(dn)
LOG.debug('FakeLdap delete item: dn=%s', common.utf8_decode(dn))
del self.db[key]
except KeyError:
LOG.debug('delete item failed: dn=%s not found.',
common.utf8_decode(dn))
raise ldap.NO_SUCH_OBJECT
self.db.sync()
def modify_s(self, dn, modlist):
"""Modify the object at dn using the attribute list.
:param dn: an LDAP DN
:param modlist: a list of tuples in the following form:
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
if server_fail:
raise ldap.SERVER_DOWN
key = self.key(dn)
LOG.debug('modify item: dn=%(dn)s attrs=%(attrs)s', {
'dn': common.utf8_decode(dn), 'attrs': modlist})
try:
entry = self.db[key]
except KeyError:
LOG.debug('modify item failed: dn=%s not found.',
common.utf8_decode(dn))
raise ldap.NO_SUCH_OBJECT
for cmd, k, v in modlist:
values = entry.setdefault(k, [])
if cmd == ldap.MOD_ADD:
v = _internal_attr(k, v)
for x in v:
if x in values:
raise ldap.TYPE_OR_VALUE_EXISTS
values += v
elif cmd == ldap.MOD_REPLACE:
values[:] = _internal_attr(k, v)
elif cmd == ldap.MOD_DELETE:
if v is None:
if not values:
LOG.debug('modify item failed: '
'item has no attribute "%s" to delete', k)
raise ldap.NO_SUCH_ATTRIBUTE
values[:] = []
else:
for val in _internal_attr(k, v):
try:
values.remove(val)
except ValueError:
LOG.debug('modify item failed: '
'item has no attribute "%(k)s" with '
'value "%(v)s" to delete', {
'k': k, 'v': val})
raise ldap.NO_SUCH_ATTRIBUTE
else:
LOG.debug('modify item failed: unknown command %s', cmd)
raise NotImplementedError('modify_s action %s not'
' implemented' % cmd)
self.db[key] = entry
self.db.sync()
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
"""Search for all matching objects under base using the query.
Args:
base -- dn to search under
scope -- search scope (base, subtree, onelevel)
filterstr -- filter objects by
attrlist -- attrs to return. Returns all attrs if not specified
"""
if server_fail:
raise ldap.SERVER_DOWN
if (not filterstr) and (scope != ldap.SCOPE_BASE):
raise AssertionError('Search without filter on onelevel or '
'subtree scope')
if scope == ldap.SCOPE_BASE:
try:
item_dict = self.db[self.key(base)]
except KeyError:
LOG.debug('search fail: dn not found for SCOPE_BASE')
raise ldap.NO_SUCH_OBJECT
results = [(base, item_dict)]
elif scope == ldap.SCOPE_SUBTREE:
# FIXME - LDAP search with SUBTREE scope must return the base
# entry, but the code below does _not_. Unfortunately, there are
# several tests that depend on this broken behavior, and fail
# when the base entry is returned in the search results. The
# fix is easy here, just initialize results as above for
# the SCOPE_BASE case.
# https://bugs.launchpad.net/keystone/+bug/1368772
try:
item_dict = self.db[self.key(base)]
except KeyError:
LOG.debug('search fail: dn not found for SCOPE_SUBTREE')
raise ldap.NO_SUCH_OBJECT
results = [(base, item_dict)]
extraresults = [(k[len(self.__prefix):], v)
for k, v in self.db.items()
if re.match('%s.*,%s' %
(re.escape(self.__prefix),
re.escape(self.dn(base))), k)]
results.extend(extraresults)
elif scope == ldap.SCOPE_ONELEVEL:
def get_entries():
base_dn = ldap.dn.str2dn(common.utf8_encode(base))
base_len = len(base_dn)
for k, v in self.db.items():
if not k.startswith(self.__prefix):
continue
k_dn_str = k[len(self.__prefix):]
k_dn = ldap.dn.str2dn(common.utf8_encode(k_dn_str))
if len(k_dn) != base_len + 1:
continue
if k_dn[-base_len:] != base_dn:
continue
yield (k_dn_str, v)
results = list(get_entries())
else:
# openldap client/server raises PROTOCOL_ERROR for unexpected scope
raise ldap.PROTOCOL_ERROR
objects = []
for dn, attrs in results:
# filter the objects by filterstr
id_attr, id_val, _ = ldap.dn.str2dn(common.utf8_encode(dn))[0][0]
id_attr = common.utf8_decode(id_attr)
id_val = common.utf8_decode(id_val)
match_attrs = attrs.copy()
match_attrs[id_attr] = [id_val]
attrs_checked = set()
if not filterstr or _match_query(common.utf8_decode(filterstr),
match_attrs,
attrs_checked):
if (filterstr and
(scope != ldap.SCOPE_BASE) and
('objectclass' not in attrs_checked)):
raise AssertionError('No objectClass in search filter')
# filter the attributes by attrlist
attrs = {k: v for k, v in attrs.items()
if not attrlist or k in common.utf8_decode(attrlist)}
objects.append((dn, attrs))
return objects
def set_option(self, option, invalue):
self._ldap_options[option] = invalue
def get_option(self, option):
value = self._ldap_options.get(option)
return value
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if clientctrls is not None or timeout != -1 or sizelimit != 0:
raise exception.NotImplemented()
# only passing a single server control is supported by this fake ldap
if len(serverctrls) > 1:
raise exception.NotImplemented()
# search_ext is async and returns an identifier used for
# retrieving the results via result3(). This will be emulated by
# storing the request in a variable with random integer key and
# performing the real lookup in result3()
msgid = random.randint(0, 1000)
PendingRequests[msgid] = (base, scope, filterstr, attrlist, attrsonly,
serverctrls)
return msgid
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
"""Execute async request.
Only msgid param is supported. Request info is fetched from global
variable `PendingRequests` by msgid, executed using search_s and
limited if requested.
"""
if all != 1 or timeout is not None or resp_ctrl_classes is not None:
raise exception.NotImplemented()
params = PendingRequests[msgid]
# search_s accepts a subset of parameters of search_ext,
# that's why we use only the first 5.
results = self.search_s(*params[:5])
# extract limit from serverctrl
serverctrls = params[5]
ctrl = serverctrls[0]
if ctrl.size:
rdata = results[:ctrl.size]
else:
rdata = results
# real result3 returns various service info -- rtype, rmsgid,
# serverctrls. Now this info is not used, so all this info is None
rtype = None
rmsgid = None
serverctrls = None
return (rtype, rdata, rmsgid, serverctrls)
class FakeLdapPool(FakeLdap):
"""Emulate the python-ldap API with pooled connections.
This class is used as connector class in PooledLDAPHandler.
"""
def __init__(self, uri, retry_max=None, retry_delay=None, conn=None):
super(FakeLdapPool, self).__init__(conn=conn)
self.url = uri
self.connected = None
self.conn = self
self._connection_time = 5 # any number greater than 0
def get_lifetime(self):
return self._connection_time
def simple_bind_s(self, who=None, cred=None,
serverctrls=None, clientctrls=None):
if self.url.startswith('fakepool://memory'):
if self.url not in FakeShelves:
FakeShelves[self.url] = FakeShelve()
self.db = FakeShelves[self.url]
else:
self.db = shelve.open(self.url[11:])
if not who:
who = 'cn=Admin'
if not cred:
cred = 'password'
super(FakeLdapPool, self).simple_bind_s(who=who, cred=cred,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_ext_s(self):
"""Added to extend FakeLdap as connector class."""
pass
class FakeLdapNoSubtreeDelete(FakeLdap):
"""FakeLdap subclass that does not support subtree delete.
Same as FakeLdap except delete will throw the LDAP error
ldap.NOT_ALLOWED_ON_NONLEAF if there is an attempt to delete
an entry that has children.
"""
def delete_ext_s(self, dn, serverctrls, clientctrls=None):
"""Remove the ldap object at specified dn."""
if server_fail:
raise ldap.SERVER_DOWN
try:
children = self._getChildren(dn)
if children:
raise ldap.NOT_ALLOWED_ON_NONLEAF
except KeyError:
LOG.debug('delete item failed: dn=%s not found.',
common.utf8_decode(dn))
raise ldap.NO_SUCH_OBJECT
super(FakeLdapNoSubtreeDelete, self).delete_ext_s(dn,
serverctrls,
clientctrls)
| apache-2.0 |
Arcanemagus/plexpy | lib/dns/rdtypes/IN/IPSECKEY.py | 8 | 5682 | # Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import base64
import dns.exception
import dns.inet
import dns.name
class IPSECKEY(dns.rdata.Rdata):
"""IPSECKEY record
@ivar precedence: the precedence for this key data
@type precedence: int
@ivar gateway_type: the gateway type
@type gateway_type: int
@ivar algorithm: the algorithm to use
@type algorithm: int
@ivar gateway: the public key
@type gateway: None, IPv4 address, IPV6 address, or domain name
@ivar key: the public key
@type key: string
@see: RFC 4025"""
__slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key):
super(IPSECKEY, self).__init__(rdclass, rdtype)
if gateway_type == 0:
if gateway != '.' and gateway is not None:
raise SyntaxError('invalid gateway for gateway type 0')
gateway = None
elif gateway_type == 1:
# check that it's OK
dns.inet.inet_pton(dns.inet.AF_INET, gateway)
elif gateway_type == 2:
# check that it's OK
dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
elif gateway_type == 3:
pass
else:
raise SyntaxError(
'invalid IPSECKEY gateway type: %d' % gateway_type)
self.precedence = precedence
self.gateway_type = gateway_type
self.algorithm = algorithm
self.gateway = gateway
self.key = key
def to_text(self, origin=None, relativize=True, **kw):
if self.gateway_type == 0:
gateway = '.'
elif self.gateway_type == 1:
gateway = self.gateway
elif self.gateway_type == 2:
gateway = self.gateway
elif self.gateway_type == 3:
gateway = str(self.gateway.choose_relativity(origin, relativize))
else:
raise ValueError('invalid gateway type')
return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
self.algorithm, gateway,
dns.rdata._base64ify(self.key))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
precedence = tok.get_uint8()
gateway_type = tok.get_uint8()
algorithm = tok.get_uint8()
if gateway_type == 3:
gateway = tok.get_name().choose_relativity(origin, relativize)
else:
gateway = tok.get_string()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value.encode())
b64 = b''.join(chunks)
key = base64.b64decode(b64)
return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key)
def to_wire(self, file, compress=None, origin=None):
header = struct.pack("!BBB", self.precedence, self.gateway_type,
self.algorithm)
file.write(header)
if self.gateway_type == 0:
pass
elif self.gateway_type == 1:
file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
elif self.gateway_type == 2:
file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
elif self.gateway_type == 3:
self.gateway.to_wire(file, None, origin)
else:
raise ValueError('invalid gateway type')
file.write(self.key)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
if rdlen < 3:
raise dns.exception.FormError
header = struct.unpack('!BBB', wire[current: current + 3])
gateway_type = header[1]
current += 3
rdlen -= 3
if gateway_type == 0:
gateway = None
elif gateway_type == 1:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
wire[current: current + 4])
current += 4
rdlen -= 4
elif gateway_type == 2:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
wire[current: current + 16])
current += 16
rdlen -= 16
elif gateway_type == 3:
(gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
else:
raise dns.exception.FormError('invalid IPSECKEY gateway type')
key = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], gateway_type, header[2],
gateway, key)
| gpl-3.0 |
scollis/iris | lib/iris/tests/test_nimrod.py | 1 | 3683 | # (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
import iris
import iris.fileformats.nimrod_load_rules as nimrod_load_rules
def mock_nimrod_field():
field = iris.fileformats.nimrod.NimrodField()
field.int_mdi = -32767
field.float32_mdi = -32767.0
return field
class TestLoad(tests.IrisTest):
@tests.skip_data
def test_multi_field_load(self):
# load a cube with two fields
cube = iris.load(tests.get_data_path(
('NIMROD', 'uk2km', 'WO0000000003452',
'201007020900_u1096_ng_ey00_visibility0180_screen_2km')))
self.assertCML(cube, ("nimrod", "load_2flds.cml"))
def test_orography(self):
# Mock an orography field we've seen.
field = mock_nimrod_field()
cube = iris.cube.Cube(np.arange(100).reshape(10, 10))
field.dt_year = field.dt_month = field.dt_day = field.int_mdi
field.dt_hour = field.dt_minute = field.int_mdi
field.proj_biaxial_ellipsoid = 0
field.tm_meridian_scaling = 0.999601
field.field_code = 73
field.vertical_coord_type = 1
field.title = "(MOCK) 2km mean orography"
field.units = "metres"
field.source = "GLOBE DTM"
nimrod_load_rules.name(cube, field)
nimrod_load_rules.units(cube, field)
nimrod_load_rules.reference_time(cube, field)
nimrod_load_rules.proj_biaxial_ellipsoid(cube, field)
nimrod_load_rules.tm_meridian_scaling(cube, field)
nimrod_load_rules.vertical_coord(cube, field)
nimrod_load_rules.attributes(cube, field)
self.assertCML(cube, ("nimrod", "mockography.cml"))
def test_levels_below_ground(self):
# Mock a soil temperature field we've seen.
field = mock_nimrod_field()
cube = iris.cube.Cube(np.arange(100).reshape(10, 10))
field.field_code = -1 # Not orography
field.reference_vertical_coord_type = field.int_mdi # Not bounded
field.vertical_coord_type = 12
field.vertical_coord = 42
nimrod_load_rules.vertical_coord(cube, field)
self.assertCML(cube, ("nimrod", "levels_below_ground.cml"))
def test_period_of_interest(self):
# mock a pressure field
field = mock_nimrod_field()
cube = iris.cube.Cube(np.arange(100).reshape(10, 10))
field.field_code = 0
field.vt_year = 2013
field.vt_month = 5
field.vt_day = 7
field.vt_hour = 6
field.vt_minute = 0
field.vt_second = 0
field.dt_year = 2013
field.dt_month = 5
field.dt_day = 7
field.dt_hour = 6
field.dt_minute = 0
field.dt_second = 0
field.period_minutes = 60
nimrod_load_rules.time(cube, field)
self.assertCML(cube, ("nimrod", "period_of_interest.cml"))
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
ShinySide/SM-A700F | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
thaim/ansible | lib/ansible/modules/network/fortios/fortios_log_fortianalyzer2_filter.py | 14 | 14272 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_fortianalyzer2_filter
short_description: Filters for FortiAnalyzer in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify log_fortianalyzer2 feature and filter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
log_fortianalyzer2_filter:
description:
- Filters for FortiAnalyzer.
default: null
type: dict
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
type: str
choices:
- enable
- disable
dlp_archive:
description:
- Enable/disable DLP archive logging.
type: str
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
type: str
choices:
- enable
- disable
filter:
description:
- FortiAnalyzer 2 log filter.
type: str
filter_type:
description:
- Include/exclude logs that match the filter.
type: str
choices:
- include
- exclude
forward_traffic:
description:
- Enable/disable forward traffic logging.
type: str
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
type: str
choices:
- enable
- disable
local_traffic:
description:
- Enable/disable local in or out traffic logging.
type: str
choices:
- enable
- disable
multicast_traffic:
description:
- Enable/disable multicast traffic logging.
type: str
choices:
- enable
- disable
netscan_discovery:
description:
- Enable/disable netscan discovery event logging.
type: str
netscan_vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
type: str
severity:
description:
- Log every message above and including this severity level.
type: str
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer_traffic:
description:
- Enable/disable sniffer traffic logging.
type: str
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
type: str
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Filters for FortiAnalyzer.
fortios_log_fortianalyzer2_filter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_fortianalyzer2_filter:
anomaly: "enable"
dlp_archive: "enable"
dns: "enable"
filter: "<your_own_value>"
filter_type: "include"
forward_traffic: "enable"
gtp: "enable"
local_traffic: "enable"
multicast_traffic: "enable"
netscan_discovery: "<your_own_value>"
netscan_vulnerability: "<your_own_value>"
severity: "emergency"
sniffer_traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_log_fortianalyzer2_filter_data(json):
option_list = ['anomaly', 'dlp_archive', 'dns',
'filter', 'filter_type', 'forward_traffic',
'gtp', 'local_traffic', 'multicast_traffic',
'netscan_discovery', 'netscan_vulnerability', 'severity',
'sniffer_traffic', 'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def log_fortianalyzer2_filter(data, fos):
vdom = data['vdom']
log_fortianalyzer2_filter_data = data['log_fortianalyzer2_filter']
filtered_data = underscore_to_hyphen(filter_log_fortianalyzer2_filter_data(log_fortianalyzer2_filter_data))
return fos.set('log.fortianalyzer2',
'filter',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log_fortianalyzer2(data, fos):
if data['log_fortianalyzer2_filter']:
resp = log_fortianalyzer2_filter(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"log_fortianalyzer2_filter": {
"required": False, "type": "dict", "default": None,
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dlp_archive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "str"},
"filter_type": {"required": False, "type": "str",
"choices": ["include", "exclude"]},
"forward_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"multicast_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"netscan_discovery": {"required": False, "type": "str"},
"netscan_vulnerability": {"required": False, "type": "str"},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"sniffer_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_log_fortianalyzer2(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_log_fortianalyzer2(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| mit |
kiniou/qtile | libqtile/widget/prompt.py | 2 | 14516 | # Copyright (c) 2010-2011 Aldo Cortesi
# Copyright (c) 2010 Philip Kranz
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011 Paul Colomiets
# Copyright (c) 2011-2012 roger
# Copyright (c) 2011-2012, 2014 Tycho Andersen
# Copyright (c) 2012 Dustin Lacewell
# Copyright (c) 2012 Laurie Clark-Michalek
# Copyright (c) 2012-2014 Craig Barnes
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import glob
import os
import string
from .. import bar, xkeysyms, xcbq, command
from . import base
class NullCompleter:
def __init__(self, qtile):
self.qtile = qtile
self.thisfinal = ""
def actual(self):
return self.thisfinal
def reset(self):
pass
def complete(self, txt):
return txt
class FileCompleter:
def __init__(self, qtile, _testing=False):
self._testing = _testing
self.qtile = qtile
self.thisfinal = None
self.reset()
def actual(self):
return self.thisfinal
def reset(self):
self.lookup = None
def complete(self, txt):
"""
Returns the next completion for txt, or None if there is no completion.
"""
if not self.lookup:
self.lookup = []
if txt == "" or txt[0] not in "~/":
txt = "~/" + txt
path = os.path.expanduser(txt)
if os.path.isdir(path):
files = glob.glob(os.path.join(path, "*"))
prefix = txt
else:
files = glob.glob(path + "*")
prefix = os.path.dirname(txt)
prefix = prefix.rstrip("/") or "/"
for f in files:
display = os.path.join(prefix, os.path.basename(f))
if os.path.isdir(f):
display += "/"
self.lookup.append((display, f))
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = ret[1]
return ret[0]
class QshCompleter:
def __init__(self, qtile):
self.qtile = qtile
self.client = command.CommandRoot(self.qtile)
self.thisfinal = None
self.reset()
def actual(self):
return self.thisfinal
def reset(self):
self.lookup = None
self.path = ''
self.offset = -1
def complete(self, txt):
txt = txt.lower()
if not self.lookup:
self.lookup = []
path = txt.split('.')[:-1]
self.path = '.'.join(path)
term = txt.split('.')[-1]
if len(self.path) > 0:
self.path += '.'
contains_cmd = 'self.client.%s_contains' % self.path
try:
contains = eval(contains_cmd)
except AttributeError:
contains = []
for obj in contains:
if obj.lower().startswith(term):
self.lookup.append((obj, obj))
commands_cmd = 'self.client.%scommands()' % self.path
try:
commands = eval(commands_cmd)
except (command.CommandError, AttributeError):
commands = []
for cmd in commands:
if cmd.lower().startswith(term):
self.lookup.append((cmd + '()', cmd + '()'))
self.offset = -1
self.lookup.append((term, term))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = self.path + ret[0]
return self.path + ret[0]
class GroupCompleter:
def __init__(self, qtile):
self.qtile = qtile
self.thisfinal = None
self.lookup = None
self.offset = None
def actual(self):
"""
Returns the current actual value.
"""
return self.thisfinal
def reset(self):
self.lookup = None
self.offset = -1
def complete(self, txt):
"""
Returns the next completion for txt, or None if there is no completion.
"""
txt = txt.lower()
if not self.lookup:
self.lookup = []
for group in self.qtile.groupMap.keys():
if group.lower().startswith(txt):
self.lookup.append((group, group))
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = ret[1]
return ret[0]
class WindowCompleter:
def __init__(self, qtile):
self.qtile = qtile
self.thisfinal = None
self.lookup = None
self.offset = None
def actual(self):
"""
Returns the current actual value.
"""
return self.thisfinal
def reset(self):
self.lookup = None
self.offset = -1
def complete(self, txt):
"""
Returns the next completion for txt, or None if there is no completion.
"""
if not self.lookup:
self.lookup = []
for wid, window in self.qtile.windowMap.items():
if window.group and window.name.lower().startswith(txt):
self.lookup.append((window.name, wid))
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = ret[1]
return ret[0]
class CommandCompleter:
DEFAULTPATH = "/bin:/usr/bin:/usr/local/bin"
def __init__(self, qtile, _testing=False):
"""
_testing: disables reloading of the lookup table
to make testing possible.
"""
self.lookup = None
self.offset = None
self.thisfinal = None
self._testing = _testing
def actual(self):
"""
Returns the current actual value.
"""
return self.thisfinal
def executable(self, fpath):
return os.access(fpath, os.X_OK)
def reset(self):
self.lookup = None
self.offset = -1
def complete(self, txt):
"""
Returns the next completion for txt, or None if there is no completion.
"""
if not self.lookup:
if not self._testing:
# Lookup is a set of (display value, actual value) tuples.
self.lookup = []
if txt and txt[0] in "~/":
path = os.path.expanduser(txt)
if os.path.isdir(path):
files = glob.glob(os.path.join(path, "*"))
prefix = txt
else:
files = glob.glob(path + "*")
prefix = os.path.dirname(txt)
prefix = prefix.rstrip("/") or "/"
for f in files:
if self.executable(f):
display = os.path.join(prefix, os.path.basename(f))
if os.path.isdir(f):
display += "/"
self.lookup.append((display, f))
else:
dirs = os.environ.get("PATH", self.DEFAULTPATH).split(":")
for didx, d in enumerate(dirs):
try:
for cmd in glob.glob(os.path.join(d, "%s*" % txt)):
if self.executable(cmd):
self.lookup.append(
(
os.path.basename(cmd),
cmd
),
)
except OSError:
pass
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = ret[1]
return ret[0]
class Prompt(base._TextBox):
"""
A widget that prompts for user input. Input should be started using the
.startInput method on this class.
"""
completers = {
"file": FileCompleter,
"qsh": QshCompleter,
"cmd": CommandCompleter,
"group": GroupCompleter,
"window": WindowCompleter,
None: NullCompleter
}
defaults = [("cursorblink", 0.5, "Cursor blink rate. 0 to disable."),
("prompt", "{prompt}: ", "Text displayed at the prompt")]
def __init__(self, name="prompt", **config):
base._TextBox.__init__(self, "", bar.CALCULATED, **config)
self.add_defaults(Prompt.defaults)
self.name = name
self.active = False
self.blink = False
self.completer = None
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
def startInput(self, prompt, callback,
complete=None, strict_completer=False):
"""
complete: Tab-completion. Can be None, or "cmd".
Displays a prompt and starts to take one line of keyboard input
from the user. When done, calls the callback with the input string
as argument.
prompt = text displayed at the prompt, e.g. "spawn: "
callback = function to call with returned value.
complete = completer to use.
strict_completer = When True the retuen value wil be the exact
completer result where available.
"""
if self.cursorblink and not self.active:
self.timeout_add(self.cursorblink, self._blink)
self.display = self.prompt.format(prompt=prompt)
self.active = True
self.userInput = ""
self.callback = callback
self.completer = self.completers[complete](self.qtile)
self.strict_completer = strict_completer
self._update()
self.bar.widget_grab_keyboard(self)
def _calculate_real_width(self):
if self.blink:
return min(
self.layout.width,
self.bar.width
) + self.actual_padding * 2
else:
_text = self.text
self.text = _text + "_"
width = min(
self.layout.width,
self.bar.width
) + self.actual_padding * 2
self.text = _text
return width
def calculate_width(self):
if self.text:
return self._calculate_real_width()
else:
return 0
def _blink(self):
self.blink = not self.blink
self._update()
if self.active:
self.timeout_add(self.cursorblink, self._blink)
def _update(self):
if self.active:
self.text = "%s%s" % (self.display, self.userInput)
if self.blink:
self.text = self.text + "_"
else:
self.text = self.text
else:
self.text = ""
self.bar.draw()
def handle_KeyPress(self, e):
"""
KeyPress handler for the minibuffer.
Currently only supports ASCII characters.
"""
state = e.state & ~(self.qtile.numlockMask)
keysym = self.qtile.conn.keycode_to_keysym(e.detail, state)
if keysym == xkeysyms.keysyms['Tab']:
self.userInput = self.completer.complete(self.userInput)
else:
actual_value = self.completer.actual()
self.completer.reset()
if keysym < 127 and chr(keysym) in string.printable:
# No LookupString in XCB... oh,
# the shame! Unicode users beware!
self.userInput += chr(keysym)
elif (keysym == xkeysyms.keysyms['BackSpace'] and
len(self.userInput) > 0):
self.userInput = self.userInput[:-1]
elif keysym == xkeysyms.keysyms['Escape']:
self.active = False
self.bar.widget_ungrab_keyboard()
elif keysym == xkeysyms.keysyms['Return']:
self.active = False
self.bar.widget_ungrab_keyboard()
if self.strict_completer:
self.callback(actual_value or self.userInput)
else:
self.callback(self.userInput)
self._update()
def cmd_fake_keypress(self, key):
class Dummy:
pass
d = Dummy()
keysym = xcbq.keysyms[key]
d.detail = self.qtile.conn.keysym_to_keycode(keysym)
d.state = 0
self.handle_KeyPress(d)
def cmd_info(self):
"""
Returns a dictionary of info for this object.
"""
return dict(
name=self.name,
width=self.width,
text=self.text,
active=self.active,
)
| mit |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.4.40/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py | 9 | 3597 | # pylint: disable=missing-docstring
import re
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
# pylint: disable=too-many-branches,too-many-statements,too-many-arguments
def run(self, terms, variables=None, zones_enabled=True, short_version=None,
deployment_type=None, **kwargs):
priorities = [
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1}
]
if short_version is None or deployment_type is None:
if 'openshift' not in variables:
raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
if deployment_type is None:
if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
raise AnsibleError("This lookup module requires that the deployment_type be set")
deployment_type = variables['openshift']['common']['deployment_type']
if short_version is None:
if 'short_version' in variables['openshift']['common']:
short_version = variables['openshift']['common']['short_version']
elif 'openshift_release' in variables:
release = variables['openshift_release']
if release.startswith('v'):
short_version = release[1:]
else:
short_version = release
short_version = '.'.join(short_version.split('.')[0:2])
elif 'openshift_version' in variables:
version = variables['openshift_version']
short_version = '.'.join(version.split('.')[0:2])
else:
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
if short_version not in ['1.1', '1.2', '1.3', '1.4']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4']:
raise AnsibleError("Unknown short_version %s" % short_version)
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
if deployment_type == 'openshift-enterprise':
# convert short_version to origin short_version
short_version = re.sub('^3.', '1.', short_version)
if short_version == '1.4':
priorities.append({'name': 'NodePreferAvoidPodsPriority', 'weight': 10000})
# only 1.1 didn't include NodeAffinityPriority
if short_version != '1.1':
priorities.append({'name': 'NodeAffinityPriority', 'weight': 1})
if short_version not in ['1.1', '1.2']:
priorities.append({'name': 'TaintTolerationPriority', 'weight': 1})
if short_version not in ['1.1', '1.2', '1.3']:
priorities.append({'name': 'InterPodAffinityPriority', 'weight': 1})
if zones_enabled:
zone_priority = {
'name': 'Zone',
'argument': {
'serviceAntiAffinity': {
'label': 'zone'
}
},
'weight': 2
}
priorities.append(zone_priority)
return priorities
| apache-2.0 |
apbard/scipy | scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py | 11 | 5612 | """ Test functions for the sparse.linalg.eigen.lobpcg module
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less, assert_)
from scipy import ones, rand, r_, diag, linalg, eye
from scipy.linalg import eig, eigh, toeplitz
import scipy.sparse
from scipy.sparse.linalg.eigen.lobpcg import lobpcg
def ElasticRod(n):
# Fixed-free elastic rod
L = 1.0
le = L/n
rho = 7.85e3
S = 1.e-4
E = 2.1e11
mass = rho*S*le/6.
k = E*S/le
A = k*(diag(r_[2.*ones(n-1),1])-diag(ones(n-1),1)-diag(ones(n-1),-1))
B = mass*(diag(r_[4.*ones(n-1),2])+diag(ones(n-1),1)+diag(ones(n-1),-1))
return A,B
def MikotaPair(n):
# Mikota pair acts as a nice test since the eigenvalues
# are the squares of the integers n, n=1,2,...
x = np.arange(1,n+1)
B = diag(1./x)
y = np.arange(n-1,0,-1)
z = np.arange(2*n-1,0,-2)
A = diag(z)-diag(y,-1)-diag(y,1)
return A,B
def compare_solutions(A,B,m):
n = A.shape[0]
np.random.seed(0)
V = rand(n,m)
X = linalg.orth(V)
eigs,vecs = lobpcg(A, X, B=B, tol=1e-5, maxiter=30)
eigs.sort()
w,v = eig(A,b=B)
w.sort()
assert_almost_equal(w[:int(m/2)],eigs[:int(m/2)],decimal=2)
def test_Small():
A,B = ElasticRod(10)
compare_solutions(A,B,10)
A,B = MikotaPair(10)
compare_solutions(A,B,10)
def test_ElasticRod():
A,B = ElasticRod(100)
compare_solutions(A,B,20)
def test_MikotaPair():
A,B = MikotaPair(100)
compare_solutions(A,B,20)
def test_trivial():
n = 5
X = ones((n, 1))
A = eye(n)
compare_solutions(A, None, n)
def test_regression():
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
n = 10
X = np.ones((n, 1))
A = np.identity(n)
w, V = lobpcg(A, X)
assert_allclose(w, [1])
def test_diagonal():
# This test was moved from '__main__' in lobpcg.py.
# Coincidentally or not, this is the same eigensystem
# required to reproduce arpack bug
# http://forge.scilab.org/index.php/p/arpack-ng/issues/1397/
# even using the same n=100.
np.random.seed(1234)
# The system of interest is of size n x n.
n = 100
# We care about only m eigenpairs.
m = 4
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A to be the diagonal matrix whose entries are 1..n
# and where B is chosen to be the identity matrix.
vals = np.arange(1, n+1, dtype=float)
A = scipy.sparse.diags([vals], [0], (n, n))
B = scipy.sparse.eye(n)
# Let the preconditioner M be the inverse of A.
M = scipy.sparse.diags([np.reciprocal(vals)], [0], (n, n))
# Pick random initial vectors.
X = np.random.rand(n, m)
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors.
m_excluded = 3
Y = np.eye(n, m_excluded)
eigs, vecs = lobpcg(A, X, B, M=M, Y=Y, tol=1e-4, maxiter=40, largest=False)
assert_allclose(eigs, np.arange(1+m_excluded, 1+m_excluded+m))
_check_eigen(A, eigs, vecs, rtol=1e-3, atol=1e-3)
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
mult_wV = np.multiply(w, V)
dot_MV = M.dot(V)
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
def _check_fiedler(n, p):
# This is not necessarily the recommended way to find the Fiedler vector.
np.random.seed(1234)
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
def test_fiedler_small_8():
# This triggers the dense path because 8 < 2*5.
_check_fiedler(8, 2)
def test_fiedler_large_12():
# This does not trigger the dense path, because 2*5 <= 12.
_check_fiedler(12, 2)
| bsd-3-clause |
TeamWin/android_kernel_samsung_galaxys2plus-common | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
boto/botocore | tests/functional/test_h2_required.py | 2 | 2083 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import get_session
_H2_REQUIRED = object()
# Service names to list of known HTTP 2 operations
_KNOWN_SERVICES = {
'kinesis': ['SubscribeToShard'],
'lexv2-runtime': ['StartConversation'],
}
def test_all_uses_of_h2_are_known():
session = get_session()
loader = session.get_component('data_loader')
services = loader.list_available_services('service-2')
for service in services:
service_model = session.get_service_model(service)
h2_config = service_model.metadata.get('protocolSettings', {}).get('h2')
if h2_config == 'required':
yield _assert_h2_service_is_known, service
elif h2_config == 'eventstream':
for operation in service_model.operation_names:
operation_model = service_model.operation_model(operation)
if operation_model.has_event_stream_output:
yield _assert_h2_operation_is_known, service, operation
def _assert_h2_service_is_known(service):
# Validates that a service that requires HTTP 2 for all operations is known
message = 'Found unknown HTTP 2 service: %s' % service
assert _KNOWN_SERVICES.get(service) is _H2_REQUIRED, message
def _assert_h2_operation_is_known(service, operation):
# Validates that an operation that requires HTTP 2 is known
known_operations = _KNOWN_SERVICES.get(service, [])
message = 'Found unknown HTTP 2 operation: %s.%s' % (service, operation)
assert operation in known_operations, message
| apache-2.0 |
pwollstadt/IDTxl | test/test_neighbour_search_opencl.py | 2 | 22420 | """Provide unit tests for neighbour searches using OpenCl GPU-code.
Tests are based on unit tests by Pedro Mediano
https://github.com/pmediano/jidt/tree/master/java/source/infodynamics/
measures/continuous/kraskov/cuda
"""
import pytest
import numpy as np
from idtxl.estimators_opencl import OpenCLKraskovMI, OpenCLKraskovCMI
# Skip test module if pyopencl is not installed
pytest.importorskip('pyopencl')
settings = {'theiler_t': 0,
'kraskov_k': 1,
'noise_level': 0,
'gpu_id': 0,
'debug': True,
'return_counts': True,
'verbose': True}
EST_MI = OpenCLKraskovMI(settings)
EST_CMI = OpenCLKraskovCMI(settings)
def test_knn_one_dim():
"""Test kNN search in 1D."""
n_chunks = 16
pointset1 = np.expand_dims(np.array([-1, -1.2, 1, 1.1]), axis=1)
pointset2 = np.expand_dims(np.array([99, 99, 99, 99]), axis=1) # dummy
pointset1 = np.tile(pointset1, (n_chunks, 1))
pointset2 = np.tile(pointset2, (n_chunks, 1))
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[0], 0.2), 'Distance 0 not correct.'
assert np.isclose(dist1[1], 0.2), 'Distance 1 not correct.'
assert np.isclose(dist1[2], 0.1), 'Distance 2 not correct.'
assert np.isclose(dist1[3], 0.1), 'Distance 3 not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks=n_chunks)
assert np.isclose(dist2[0], 0.2), 'Distance 0 not correct.'
assert np.isclose(dist2[1], 0.2), 'Distance 1 not correct.'
assert np.isclose(dist2[2], 0.1), 'Distance 2 not correct.'
assert np.isclose(dist2[3], 0.1), 'Distance 3 not correct.'
def test_knn_two_dim():
"""Test kNN search in 2D."""
n_chunks = 16
pointset1 = np.array([
[-1, -1],
[0.5, 0.5],
[1.1, 1.1],
[2, 2]])
pointset1 = np.tile(pointset1, (n_chunks, 1))
pointset2 = np.ones(pointset1.shape) * 99
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[0], 1.5), 'Distances 0 not correct.'
assert np.isclose(dist1[1], 0.6), 'Distances 1 not correct.'
assert np.isclose(dist1[2], 0.6), 'Distances 2 not correct.'
assert np.isclose(dist1[3], 0.9), 'Distances 3 not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks=n_chunks)
assert np.isclose(dist2[0], 1.5), 'Distances 0 not correct.'
assert np.isclose(dist2[1], 0.6), 'Distances 1 not correct.'
assert np.isclose(dist2[2], 0.6), 'Distances 2 not correct.'
assert np.isclose(dist2[3], 0.9), 'Distances 3 not correct.'
def test_one_dim_longer_sequence():
"""Test kNN search in 1D."""
n_chunks = 4
pointset1 = np.expand_dims(
np.array([-1, -1.2, 1, 1.1, 10, 11, 10.5, -100, -50, 666]), axis=1)
pointset1 = np.vstack((pointset1, np.ones((6, 1))*9999))
pointset1 = np.tile(pointset1, (n_chunks, 1))
pointset2 = np.ones(pointset1.shape) * 9999
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[0], 0.2), 'Distance 0 not correct.'
assert np.isclose(dist1[1], 0.2), 'Distance 1 not correct.'
assert np.isclose(dist1[2], 0.1), 'Distance 2 not correct.'
assert np.isclose(dist1[3], 0.1), 'Distance 3 not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks=n_chunks)
assert np.isclose(dist2[0], 0.2), 'Distance 0 not correct.'
assert np.isclose(dist2[1], 0.2), 'Distance 1 not correct.'
assert np.isclose(dist2[2], 0.1), 'Distance 2 not correct.'
assert np.isclose(dist2[3], 0.1), 'Distance 3 not correct.'
def test_two_dim_longer_sequence():
"""Test kNN with longer sequences.
Note:
The expected results differ from the C++ unit tests because we use the
maximum norm when searching for neighbours.
"""
# This is the same sequence as in the previous test case, padded with a
# bunch of points very far away.
n_chunks = 4
pointset1 = np.array(
[[-1, 0.5, 1.1, 2, 10, 11, 10.5, -100, -50, 666],
[-1, 0.5, 1.1, 2, 98, -9, -200, 45.3, -53, 0.1]])
pointset1 = np.hstack((pointset1, np.ones((2, 6))*9999)).T.copy()
pointset1 = np.tile(pointset1, (n_chunks, 1))
pointset2 = np.ones(pointset1.shape) * 9999
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[0], 1.5), 'Distances 0 not correct.'
assert np.isclose(dist1[1], 0.6), 'Distances 1 not correct.'
assert np.isclose(dist1[2], 0.6), 'Distances 2 not correct.'
assert np.isclose(dist1[3], 0.9), 'Distances 3 not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks=n_chunks)
assert np.isclose(dist2[0], 1.5), 'Distances 0 not correct.'
assert np.isclose(dist2[1], 0.6), 'Distances 1 not correct.'
assert np.isclose(dist2[2], 0.6), 'Distances 2 not correct.'
assert np.isclose(dist2[3], 0.9), 'Distances 3 not correct.'
def test_random_data():
"""Smoke kNN test with big random dataset."""
n_points = 1000
n_dims = 5
pointset1 = np.random.randn(n_points, n_dims).astype('float32')
pointset2 = pointset1
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=1)
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks=1)
assert np.all(np.isclose(dist1, dist2)), (
'High- and low-level calls returned different distances.')
def test_two_chunks():
"""Run knn search for two chunks."""
n_chunks = 2 * 8
pointset1 = np.expand_dims( # this is data for two chunks
np.array([5, 6, -5, -7, 50, -50, 60, -70]), axis=1)
pointset1 = np.tile(pointset1, (n_chunks // 2, 1))
pointset2 = np.ones(pointset1.shape) * 9999
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
print(dist1)
assert np.isclose(dist1[0], 1), 'Distance 0 not correct.'
assert np.isclose(dist1[1], 1), 'Distance 1 not correct.'
assert np.isclose(dist1[2], 2), 'Distance 2 not correct.'
assert np.isclose(dist1[3], 2), 'Distance 3 not correct.'
assert np.isclose(dist1[4], 10), 'Distance 4 not correct.'
assert np.isclose(dist1[5], 20), 'Distance 5 not correct.'
assert np.isclose(dist1[6], 10), 'Distance 6 not correct.'
assert np.isclose(dist1[7], 20), 'Distance 7 not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks=n_chunks)
print(dist2)
assert np.isclose(dist2[0], 1), 'Distance 0 not correct.'
assert np.isclose(dist2[1], 1), 'Distance 1 not correct.'
assert np.isclose(dist2[2], 2), 'Distance 2 not correct.'
assert np.isclose(dist2[3], 2), 'Distance 3 not correct.'
assert np.isclose(dist2[4], 10), 'Distance 4 not correct.'
assert np.isclose(dist2[5], 20), 'Distance 5 not correct.'
assert np.isclose(dist2[6], 10), 'Distance 6 not correct.'
assert np.isclose(dist2[7], 20), 'Distance 7 not correct.'
def test_three_chunks():
"""Run knn search for three chunks."""
n_chunks = 3 * 16
pointset1 = np.expand_dims(np.array(
[5, 6, -5, -7, 50, -50, 60, -70, 500, -500, 600, -700]), axis=1)
pointset1 = np.tile(pointset1, (16, 1))
pointset2 = np.ones(pointset1.shape) * 9999
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[0], 1), 'Distance 0 is not correct.'
assert np.isclose(dist1[1], 1), 'Distance 1 is not correct.'
assert np.isclose(dist1[2], 2), 'Distance 2 is not correct.'
assert np.isclose(dist1[3], 2), 'Distance 3 is not correct.'
assert np.isclose(dist1[4], 10), 'Distance 4 is not correct.'
assert np.isclose(dist1[5], 20), 'Distance 5 is not correct.'
assert np.isclose(dist1[6], 10), 'Distance 6 is not correct.'
assert np.isclose(dist1[7], 20), 'Distance 7 is not correct.'
assert np.isclose(dist1[8], 100), 'Distance 8 is not correct.'
assert np.isclose(dist1[9], 200), 'Distance 9 is not correct.'
assert np.isclose(dist1[10], 100), 'Distance 10 is not correct.'
assert np.isclose(dist1[11], 200), 'Distance 11 is not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks=n_chunks)
assert np.isclose(dist2[0], 1), 'Distance 0 is not correct.'
assert np.isclose(dist2[1], 1), 'Distance 1 is not correct.'
assert np.isclose(dist2[2], 2), 'Distance 2 is not correct.'
assert np.isclose(dist2[3], 2), 'Distance 3 is not correct.'
assert np.isclose(dist2[4], 10), 'Distance 4 is not correct.'
assert np.isclose(dist2[5], 20), 'Distance 5 is not correct.'
assert np.isclose(dist2[6], 10), 'Distance 6 is not correct.'
assert np.isclose(dist2[7], 20), 'Distance 7 is not correct.'
assert np.isclose(dist2[8], 100), 'Distance 8 is not correct.'
assert np.isclose(dist2[9], 200), 'Distance 9 is not correct.'
assert np.isclose(dist2[10], 100), 'Distance 10 is not correct.'
assert np.isclose(dist2[11], 200), 'Distance 11 is not correct.'
def test_two_chunks_two_dim():
"""Test kNN with two chunks of 2D data in the same call."""
n_chunks = 2 * 8
pointset1 = np.array( # this is data for two chunks
[[1, 1.1, -1, -1.2, 1, 1.1, -1, -1.2],
[1, 1, -1, -1, 1, 1, -1, -1]]).T.copy()
pointset1 = np.tile(pointset1, (n_chunks // 2, 1))
pointset2 = np.ones(pointset1.shape) * 9999
# Points: X Y y
# 1 1 | o o
# 1.1 1 |
# -1 -1 ----+----x
# -1.2 -1 |
# o o |
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[0], 0.1), 'Distance 0 not correct.'
assert np.isclose(dist1[1], 0.1), 'Distance 1 not correct.'
assert np.isclose(dist1[2], 0.2), 'Distance 2 not correct.'
assert np.isclose(dist1[3], 0.2), 'Distance 3 not correct.'
assert np.isclose(dist1[4], 0.1), 'Distance 4 not correct.'
assert np.isclose(dist1[5], 0.1), 'Distance 5 not correct.'
assert np.isclose(dist1[6], 0.2), 'Distance 6 not correct.'
assert np.isclose(dist1[7], 0.2), 'Distance 7 not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks)
assert np.isclose(dist2[0], 0.1), 'Distance 0 not correct.'
assert np.isclose(dist2[1], 0.1), 'Distance 1 not correct.'
assert np.isclose(dist2[2], 0.2), 'Distance 2 not correct.'
assert np.isclose(dist2[3], 0.2), 'Distance 3 not correct.'
assert np.isclose(dist2[4], 0.1), 'Distance 4 not correct.'
assert np.isclose(dist2[5], 0.1), 'Distance 5 not correct.'
assert np.isclose(dist2[6], 0.2), 'Distance 6 not correct.'
assert np.isclose(dist2[7], 0.2), 'Distance 7 not correct.'
def test_two_chunks_odd_dim():
"""Test kNN with two chunks of data with odd dimension."""
n_chunks = 2 * 8
pointset1 = np.array([ # this is data for two chunks
[1, 1.1, -1, -1.2, 1, 1.1, -1, -1.2],
[1, 1, -1, -1, 1, 1, -1, -1],
[1.02, 1.03, 1.04, 1.05, 1.02, 1.03, 1.04, 1.05]]).T.copy()
pointset1 = np.tile(pointset1, (n_chunks // 2, 1))
pointset2 = np.ones(pointset1.shape) * 9999
# Points: X Y Z y
# 1 1 1.02 | o o
# 1.1 1 1.03 |
# -1 -1 -1.04 ----+----x
# -1.2 -1 -1.05 |
# o o |
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(dist1[1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(dist1[2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(dist1[3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(dist1[4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(dist1[5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(dist1[6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(dist1[7], 0.2), 'Distance 7 ist not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks)
assert np.isclose(dist2[0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(dist2[1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(dist2[2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(dist2[3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(dist2[4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(dist2[5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(dist2[6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(dist2[7], 0.2), 'Distance 7 ist not correct.'
def test_multiple_runs_two_dim():
"""Test kNN with two chunks of 2D data in the same call."""
settings = {
'theiler_t': 0,
'knn_k': 1,
'gpu_id': 0,
'debug': True,
'return_counts': True,
'max_mem': 5 * 1024 * 1024}
EST_MI = OpenCLKraskovMI(settings)
EST_CMI = OpenCLKraskovCMI(settings)
n_chunks = 50000
pointset1 = np.array(
[[-1, 0.5, 1.1, 2, 10, 11, 10.5, -100, -50, 666, 9999, 9999],
[-1, 0.5, 1.1, 2, 98, -9, -200, 45.3, -53, 0.1, 9999, 9999]]).T.copy()
pointset1 = np.tile(pointset1, (n_chunks, 1))
pointset2 = np.ones(pointset1.shape) * 9999
pointset3 = np.ones(pointset1.shape) * 9999
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[0], 1.5), 'Distances 0 not correct.'
assert np.isclose(dist1[1], 0.6), 'Distances 1 not correct.'
assert np.isclose(dist1[2], 0.6), 'Distances 2 not correct.'
assert np.isclose(dist1[3], 0.9), 'Distances 3 not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset3, n_chunks=n_chunks)
assert np.isclose(dist2[0], 1.5), 'Distances 0 not correct.'
assert np.isclose(dist2[1], 0.6), 'Distances 1 not correct.'
assert np.isclose(dist2[2], 0.6), 'Distances 2 not correct.'
assert np.isclose(dist2[3], 0.9), 'Distances 3 not correct.'
def test_three_large_chunks():
"""Test kNN with three large chunks, put test points at chunk end."""
n_chunks = 3
chunk_length = 50000 # add noise to beginning of chunks to achieve this
# Data for three individual chunks
chunk1 = np.expand_dims(
np.hstack((np.ones(chunk_length-4)*9999, [5, 6, -5, -7])), axis=1)
chunk2 = np.expand_dims(
np.hstack((np.ones(chunk_length-4)*9999, [50, -50, 60, -70])), axis=1)
chunk3 = np.expand_dims(
np.hstack((np.ones(chunk_length-4)*9999, [500, -500, 600, -700])), axis=1)
pointset1 = np.vstack([chunk1, chunk2, chunk3]) # multiply chunk
pointset2 = np.ones(pointset1.shape) * 9999
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[chunk_length-4], 1), 'Distance 0 is not correct.'
assert np.isclose(dist1[chunk_length-3], 1), 'Distance 1 is not correct.'
assert np.isclose(dist1[chunk_length-2], 2), 'Distance 2 is not correct.'
assert np.isclose(dist1[chunk_length-1], 2), 'Distance 3 is not correct.'
assert np.isclose(dist1[chunk_length*2-4], 10), 'Distance 4 is not correct.'
assert np.isclose(dist1[chunk_length*2-3], 20), 'Distance 5 is not correct.'
assert np.isclose(dist1[chunk_length*2-2], 10), 'Distance 6 is not correct.'
assert np.isclose(dist1[chunk_length*2-1], 20), 'Distance 7 is not correct.'
assert np.isclose(dist1[-4], 100), 'Distance 8 is not correct.'
assert np.isclose(dist1[-3], 200), 'Distance 9 is not correct.'
assert np.isclose(dist1[-2], 100), 'Distance 10 is not correct.'
assert np.isclose(dist1[-1], 200), 'Distance 11 is not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks=n_chunks)
assert np.isclose(dist2[chunk_length-4], 1), 'Distance 0 is not correct.'
assert np.isclose(dist2[chunk_length-3], 1), 'Distance 1 is not correct.'
assert np.isclose(dist2[chunk_length-2], 2), 'Distance 2 is not correct.'
assert np.isclose(dist2[chunk_length-1], 2), 'Distance 3 is not correct.'
assert np.isclose(dist2[chunk_length*2-4], 10), 'Distance 4 is not correct.'
assert np.isclose(dist2[chunk_length*2-3], 20), 'Distance 5 is not correct.'
assert np.isclose(dist2[chunk_length*2-2], 10), 'Distance 6 is not correct.'
assert np.isclose(dist2[chunk_length*2-1], 20), 'Distance 7 is not correct.'
assert np.isclose(dist2[-4], 100), 'Distance 8 is not correct.'
assert np.isclose(dist2[-3], 200), 'Distance 9 is not correct.'
assert np.isclose(dist2[-2], 100), 'Distance 10 is not correct.'
assert np.isclose(dist2[-1], 200), 'Distance 11 is not correct.'
def test_two_large_chunks_two_dim():
"""Test kNN with two large chunks of 2D data in the same call, put test points at chunk end."""
n_chunks = 2
chunk_length = 50000 # add noise to beginning of chunks to achieve this
chunk = np.array( # this is data for a single chunk
[np.hstack((np.ones(chunk_length-4)*9999, [1, 1.1, -1, -1.2])),
np.hstack((np.ones(chunk_length-4)*9999, [1, 1, -1, -1]))]).T.copy()
pointset1 = np.tile(chunk, (n_chunks, 1)) # multiply chunk
pointset2 = np.ones(pointset1.shape) * 9999
# Points: X Y y
# 1 1 | o o
# 1.1 1 |
# -1 -1 ----+----x
# -1.2 -1 |
# o o |
# Call MI estimator
mi, dist1, npoints_x, npoints_y = EST_MI.estimate(
pointset1, pointset2, n_chunks=n_chunks)
assert np.isclose(dist1[chunk_length-4], 0.1), 'Distance 0 not correct.'
assert np.isclose(dist1[chunk_length-3], 0.1), 'Distance 1 not correct.'
assert np.isclose(dist1[chunk_length-2], 0.2), 'Distance 2 not correct.'
assert np.isclose(dist1[chunk_length-1], 0.2), 'Distance 3 not correct.'
assert np.isclose(dist1[-4], 0.1), 'Distance 4 not correct.'
assert np.isclose(dist1[-3], 0.1), 'Distance 5 not correct.'
assert np.isclose(dist1[-2], 0.2), 'Distance 6 not correct.'
assert np.isclose(dist1[-1], 0.2), 'Distance 7 not correct.'
# Call CMI estimator with pointset2 as conditional (otherwise the MI
# estimator is called internally and the CMI estimator is never tested).
cmi, dist2, npoints_x, npoints_y, npoints_c = EST_CMI.estimate(
pointset1, pointset2, pointset2, n_chunks)
assert np.isclose(dist2[chunk_length-4], 0.1), 'Distance 0 not correct.'
assert np.isclose(dist2[chunk_length-3], 0.1), 'Distance 1 not correct.'
assert np.isclose(dist2[chunk_length-2], 0.2), 'Distance 2 not correct.'
assert np.isclose(dist2[chunk_length-1], 0.2), 'Distance 3 not correct.'
assert np.isclose(dist2[-4], 0.1), 'Distance 4 not correct.'
assert np.isclose(dist2[-3], 0.1), 'Distance 5 not correct.'
assert np.isclose(dist2[-2], 0.2), 'Distance 6 not correct.'
assert np.isclose(dist2[-1], 0.2), 'Distance 7 not correct.'
if __name__ == '__main__':
test_three_large_chunks()
test_two_large_chunks_two_dim()
test_random_data()
test_knn_one_dim()
test_knn_two_dim()
test_two_chunks_odd_dim()
test_two_chunks_two_dim()
test_two_chunks()
test_three_chunks()
test_one_dim_longer_sequence()
test_two_dim_longer_sequence()
test_multiple_runs_two_dim()
| gpl-3.0 |
chetan51/neon | neon/util/batch_writer.py | 10 | 12166 | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Process macro batches of data in a pipelined fashion.
"""
import logging
from glob import glob
import functools
import gzip
from multiprocessing import Pool
import numpy as np
import os
import tarfile
import struct
from PIL import Image as PILImage
from neon.util.compat import range, StringIO
from neon.util.persist import load_obj, save_obj
from neon.data import load_i1kmeta
from neon.util.argparser import NeonArgparser
parser = NeonArgparser(__doc__)
parser.add_argument('--set_type', help='(i1k|directory)', required=True,
choices=['i1k', 'directory'])
parser.add_argument('--image_dir', help='Directory to find images', required=True)
parser.add_argument('--target_size', type=int, default=256,
help='Size in pixels to scale images (Must be 256 for i1k dataset)')
parser.add_argument('--macro_size', type=int, default=5000, help='Images per processed batch')
parser.add_argument('--file_pattern', default='*.jpg', help='Image extension to include in'
'directory crawl')
args = parser.parse_args()
logger = logging.getLogger()
# NOTE: We have to leave this helper function out of the class to use multiprocess pool.map
def proc_img(target_size, squarecrop, is_string=False, imgfile=None):
imgfile = StringIO(imgfile) if is_string else imgfile
im = PILImage.open(imgfile)
# This part does the processing
scale_factor = target_size / np.float32(min(im.size))
(wnew, hnew) = map(lambda x: int(round(scale_factor * x)), im.size)
if scale_factor != 1:
filt = PILImage.BICUBIC if scale_factor > 1 else PILImage.ANTIALIAS
im = im.resize((wnew, hnew), filt)
if squarecrop is True:
(cx, cy) = map(lambda x: (x - target_size) // 2, (wnew, hnew))
im = im.crop((cx, cy, cx+target_size, cy+target_size))
buf = StringIO()
im.save(buf, format='JPEG', subsampling=0, quality=95)
return buf.getvalue()
class BatchWriter(object):
def __init__(self, out_dir, image_dir, target_size=256, squarecrop=True, validation_pct=0.2,
class_samples_max=None, file_pattern='*.jpg', macro_size=3072):
self.out_dir = os.path.expanduser(out_dir)
self.image_dir = os.path.expanduser(image_dir)
self.macro_size = macro_size
self.num_workers = 8
self.target_size = target_size
self.squarecrop = squarecrop
self.file_pattern = file_pattern
self.class_samples_max = class_samples_max
self.validation_pct = validation_pct
self.train_file = os.path.join(self.out_dir, 'train_file.csv.gz')
self.val_file = os.path.join(self.out_dir, 'val_file.csv.gz')
self.meta_file = os.path.join(self.out_dir, 'dataset_cache.pkl')
self.global_mean = None
self.batch_prefix = 'data_batch_'
def write_csv_files(self):
# Get the labels as the subdirs
subdirs = glob(os.path.join(self.image_dir, '*'))
self.label_names = sorted(map(lambda x: os.path.basename(x), subdirs))
indexes = range(len(self.label_names))
self.label_dict = {k: v for k, v in zip(self.label_names, indexes)}
tlines = []
vlines = []
for subdir in subdirs:
subdir_label = self.label_dict[os.path.basename(subdir)]
files = glob(os.path.join(subdir, self.file_pattern))
np.random.shuffle(files)
if self.class_samples_max is not None:
files = files[:self.class_samples_max]
lines = [(filename, subdir_label) for filename in files]
v_idx = int(self.validation_pct * len(lines))
tlines += lines[v_idx:]
vlines += lines[:v_idx]
np.random.shuffle(tlines)
np.random.shuffle(vlines)
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
for ff, ll in zip([self.train_file, self.val_file], [tlines, vlines]):
with gzip.open(ff, 'wb') as f:
f.write('filename,l_id\n')
for tup in ll:
f.write('{},{}\n'.format(*tup))
self.train_nrec = len(tlines)
self.ntrain = -(-self.train_nrec // self.macro_size)
self.train_start = 0
self.val_nrec = len(vlines)
self.nval = -(-self.val_nrec // self.macro_size)
self.val_start = 10 ** int(np.log10(self.ntrain * 10))
def parse_file_list(self, infile):
lines = np.loadtxt(infile, delimiter=',', skiprows=1, dtype={'names': ('fname', 'l_id'),
'formats': (object, 'i4')})
imfiles = [l[0] for l in lines]
labels = {'l_id': [l[1] for l in lines]}
self.nclass = {'l_id': (max(labels['l_id']) + 1)}
return imfiles, labels
def write_batches(self, name, offset, labels, imfiles):
pool = Pool(processes=self.num_workers)
npts = -(-len(imfiles) // self.macro_size)
starts = [i * self.macro_size for i in range(npts)]
is_tar = isinstance(imfiles[0], tarfile.ExFileObject)
proc_img_func = functools.partial(proc_img, self.target_size, self.squarecrop, is_tar)
imfiles = [imfiles[s:s + self.macro_size] for s in starts]
labels = [{k: v[s:s + self.macro_size] for k, v in labels.iteritems()} for s in starts]
print("Writing %s batches..." % (name))
for i, jpeg_file_batch in enumerate(imfiles):
if is_tar:
jpeg_file_batch = [j.read() for j in jpeg_file_batch]
jpeg_strings = pool.map(proc_img_func, jpeg_file_batch)
bfile = os.path.join(self.out_dir, '%s%d' % (self.batch_prefix, offset + i))
self.write_binary(jpeg_strings, labels[i], bfile)
print("Writing batch %d" % (i))
pool.close()
def write_binary(self, jpegs, labels, ofname):
num_imgs = len(jpegs)
keylist = ['l_id']
with open(ofname, 'wb') as f:
f.write(struct.pack('I', num_imgs))
f.write(struct.pack('I', len(keylist)))
for key in keylist:
ksz = len(key)
f.write(struct.pack('L' + 'B' * ksz, ksz, *bytearray(key)))
f.write(struct.pack('I' * num_imgs, *labels[key]))
for i in range(num_imgs):
jsz = len(jpegs[i])
bin = struct.pack('I' + 'B' * jsz, jsz, *bytearray(jpegs[i]))
f.write(bin)
def save_meta(self):
save_obj({'ntrain': self.ntrain,
'nval': self.nval,
'train_start': self.train_start,
'val_start': self.val_start,
'macro_size': self.macro_size,
'batch_prefix': self.batch_prefix,
'global_mean': self.global_mean,
'label_dict': self.label_dict,
'label_names': self.label_names,
'val_nrec': self.val_nrec,
'train_nrec': self.train_nrec,
'img_size': self.target_size,
'nclass': self.nclass}, self.meta_file)
def run(self):
self.write_csv_files()
namelist = ['train', 'validation']
filelist = [self.train_file, self.val_file]
startlist = [self.train_start, self.val_start]
for sname, fname, start in zip(namelist, filelist, startlist):
print("%s %s %s" % (sname, fname, start))
if fname is not None and os.path.exists(fname):
imgs, labels = self.parse_file_list(fname)
self.write_batches(sname, start, labels, imgs)
else:
print("Skipping %s, file missing" % (sname))
self.save_meta()
class BatchWriterImagenet(BatchWriter):
# code below adapted from Alex Krizhevsky's cuda-convnet2 library,
# make-data.py
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
def run(self):
load_dir = self.image_dir
train_tar = os.path.join(load_dir, 'ILSVRC2012_img_train.tar')
validation_tar = os.path.join(load_dir, 'ILSVRC2012_img_val.tar')
for infile in (train_tar, validation_tar):
if not os.path.exists(infile):
raise IOError(infile + " not found. Please ensure you have ImageNet downloaded."
"More info here: http://www.image-net.org/download-imageurls")
# download our version of the metadata
meta_dir = load_i1kmeta(self.out_dir)
meta_file = os.path.join(meta_dir, 'neon_ILSVRC2012_devmeta.pkl')
self.meta = load_obj(meta_file)
self.__dict__.update(self.meta) # get label_dict, label_names, global_mean from meta
np.random.seed(0)
with tarfile.open(train_tar) as tf:
s_sets = tf.getmembers()
s_tars = [tarfile.open(fileobj=tf.extractfile(s)) for s in s_sets]
print('Building trainset list from synset tars.')
t_jpegfiles = []
totalsz = len(s_tars)
for i, st in enumerate(s_tars):
if i % 100 == 0:
print("%d%% ..." % (int(round((100.0 * i) / totalsz))))
t_jpegfiles += [st.extractfile(m) for m in st.getmembers()]
st.close()
print("Done loading")
np.random.shuffle(t_jpegfiles)
train_labels = [[self.label_dict[j.name[:9]]] for j in t_jpegfiles]
self.train_nrec = len(t_jpegfiles)
self.ntrain = -(-self.train_nrec // self.macro_size)
self.nclass = {'l_id': 1000}
self.train_start = 0
train_labels = {'l_id': np.array(train_labels, dtype=np.int32)}
self.write_batches('train', self.train_start, train_labels, t_jpegfiles)
with tarfile.open(validation_tar) as tf:
jpegfiles = sorted([tf.extractfile(m) for m in tf.getmembers()], key=lambda x: x.name)
self.val_nrec = len(jpegfiles)
self.nval = -(-self.val_nrec // self.macro_size)
self.val_start = 10 ** int(np.log10(self.ntrain) + 1)
val_labels = {'l_id': np.array(self.val_ground_truth, dtype=np.int32)}
self.write_batches('val', self.val_start, val_labels, jpegfiles)
self.save_meta()
if __name__ == "__main__":
# Supply dataset type and location
if args.set_type == 'i1k':
bw = BatchWriterImagenet(out_dir=args.data_dir, image_dir=args.image_dir,
macro_size=args.macro_size)
else:
bw = BatchWriter(out_dir=args.data_dir, image_dir=args.image_dir,
target_size=args.target_size, macro_size=args.macro_size,
file_pattern=args.file_pattern)
bw.run()
| apache-2.0 |
antsmc2/mics | survey/forms/location_hierarchy.py | 2 | 1708 | from django import forms
from django.forms.formsets import BaseFormSet
from rapidsms.contrib.locations.models import Location, LocationType
from survey.models import LocationTypeDetails
class BaseArticleFormSet(BaseFormSet):
def clean(self):
for form_count in range(0, self.total_form_count()):
form = self.forms[form_count]
has_code = form.cleaned_data.get('has_code',None)
code = form.cleaned_data.get('length_of_code','')
levels = form.cleaned_data.get('levels','')
if len(levels.strip()) == 0:
message = "field cannot be empty."
form._errors["levels"] = form.error_class([message])
raise forms.ValidationError(message)
if has_code:
if not code:
message = "length of code cannot be blank if has code is checked."
form._errors["length_of_code"] = form.error_class([message])
raise forms.ValidationError(message)
class LocationHierarchyForm(forms.Form):
def __init__(self,data=None):
super(LocationHierarchyForm, self).__init__(data=data)
self.fields['country'] = forms.ChoiceField(label='Country', choices=self.get_country_choices(), widget=forms.Select, required=True)
def get_country_choices(self):
existing_country_details = LocationTypeDetails.objects.exclude(country=None)
if existing_country_details:
existing_country = existing_country_details[0].country
return [(existing_country.id, existing_country.name)]
return [(country.id,country.name) for country in Location.objects.filter(type__name__iexact='country')] | bsd-3-clause |
jollyrogue/demdb | backend/setup.py | 1 | 3942 | """Democracy Database (DemDb)
See:
https://github.com/jollyrogue/demdb
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='demdb',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='Democracy Database (DemDb) a web application to manage campaigns',
long_description=long_description,
# The project's main homepage.
url='https://github.com/jollyrogue/demdb',
# Author details
author='Ryan Quinn',
author_email='quinn.rm@gmail.com',
# Choose your license
license='AGPLv3+',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: End Users',
'Topic :: Internet :: WWW/HTTP :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License or later (AGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='web application wsgi campaign',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['flask', 'flask-restful', 'psycopg2'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'demdb=demdb:main',
],
},
)
| agpl-3.0 |
brandonPurvis/osf.io | scripts/prereg/reject_draft_registrations.py | 9 | 1717 | """ A script for testing DraftRegistrationApprovals. Automatically adds comments to and rejects
pending DraftRegistrationApprovals
"""
import sys
import logging
import datetime as dt
from website.app import init_app
from website.models import DraftRegistration, Sanction, User
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
logging.disable(level=logging.INFO)
def add_comments(draft):
comment = [{
'user': {
'id': 'itsMe',
'name': 'Mario!'
},
'value': 'Ahoy! This is a comment!',
'lastModified': dt.datetime.utcnow().isoformat()
}]
for question_id, value in draft.registration_metadata.iteritems():
value['comments'] = comment
draft.save()
def main(dry_run=True):
if dry_run:
logger.warn('DRY RUN mode')
pending_approval_drafts = DraftRegistration.find()
need_approval_drafts = [draft for draft in pending_approval_drafts
if draft.requires_approval and draft.approval and draft.approval.state == Sanction.UNAPPROVED]
for draft in need_approval_drafts:
add_comments(draft)
sanction = draft.approval
try:
if not dry_run:
sanction.forcibly_reject()
#manually do the on_reject functionality to prevent send_mail problems
sanction.meta = {}
sanction.save()
draft.approval = None
draft.save()
logger.warn('Rejected {0}'.format(draft._id))
except Exception as e:
logger.error(e)
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
init_app(routes=False)
main(dry_run=dry_run)
| apache-2.0 |
queria/my-tempest | tempest/tests/test_waiters.py | 8 | 1945 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from tempest.common import waiters
from tempest import exceptions
from tempest.tests import base
class TestImageWaiters(base.TestCase):
def setUp(self):
super(TestImageWaiters, self).setUp()
self.client = mock.MagicMock()
self.client.build_timeout = 1
self.client.build_interval = 1
def test_wait_for_image_status(self):
self.client.get_image.return_value = (None, {'status': 'active'})
start_time = int(time.time())
waiters.wait_for_image_status(self.client, 'fake_image_id', 'active')
end_time = int(time.time())
# Ensure waiter returns before build_timeout
self.assertTrue((end_time - start_time) < 10)
def test_wait_for_image_status_timeout(self):
self.client.get_image.return_value = (None, {'status': 'saving'})
self.assertRaises(exceptions.TimeoutException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
def test_wait_for_image_status_error_on_image_create(self):
self.client.get_image.return_value = (None, {'status': 'ERROR'})
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
| apache-2.0 |
micadeyeye/Blongo | django/views/generic/create_update.py | 245 | 8935 | from django.forms.models import ModelFormMetaclass, ModelForm
from django.template import RequestContext, loader
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.xheaders import populate_xheaders
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.utils.translation import ugettext
from django.contrib.auth.views import redirect_to_login
from django.views.generic import GenericViewError
from django.contrib import messages
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
PendingDeprecationWarning
)
def apply_extra_context(extra_context, context):
"""
Adds items from extra_context dict to context. If a value in extra_context
is callable, then it is called and the result is added to context.
"""
for key, value in extra_context.iteritems():
if callable(value):
context[key] = value()
else:
context[key] = value
def get_model_and_form_class(model, form_class):
"""
Returns a model and form class based on the model and form_class
parameters that were passed to the generic view.
If ``form_class`` is given then its associated model will be returned along
with ``form_class`` itself. Otherwise, if ``model`` is given, ``model``
itself will be returned along with a ``ModelForm`` class created from
``model``.
"""
if form_class:
return form_class._meta.model, form_class
if model:
# The inner Meta class fails if model = model is used for some reason.
tmp_model = model
# TODO: we should be able to construct a ModelForm without creating
# and passing in a temporary inner class.
class Meta:
model = tmp_model
class_name = model.__name__ + 'Form'
form_class = ModelFormMetaclass(class_name, (ModelForm,), {'Meta': Meta})
return model, form_class
raise GenericViewError("Generic view must be called with either a model or"
" form_class argument.")
def redirect(post_save_redirect, obj):
"""
Returns a HttpResponseRedirect to ``post_save_redirect``.
``post_save_redirect`` should be a string, and can contain named string-
substitution place holders of ``obj`` field names.
If ``post_save_redirect`` is None, then redirect to ``obj``'s URL returned
by ``get_absolute_url()``. If ``obj`` has no ``get_absolute_url`` method,
then raise ImproperlyConfigured.
This function is meant to handle the post_save_redirect parameter to the
``create_object`` and ``update_object`` views.
"""
if post_save_redirect:
return HttpResponseRedirect(post_save_redirect % obj.__dict__)
elif hasattr(obj, 'get_absolute_url'):
return HttpResponseRedirect(obj.get_absolute_url())
else:
raise ImproperlyConfigured(
"No URL to redirect to. Either pass a post_save_redirect"
" parameter to the generic view or define a get_absolute_url"
" method on the Model.")
def lookup_object(model, object_id, slug, slug_field):
"""
Return the ``model`` object with the passed ``object_id``. If
``object_id`` is None, then return the object whose ``slug_field``
equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed,
then raise Http404 exception.
"""
lookup_kwargs = {}
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise GenericViewError(
"Generic view must be called with either an object_id or a"
" slug/slug_field.")
try:
return model.objects.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for %s"
% (model._meta.verbose_name, lookup_kwargs))
def create_object(request, model=None, template_name=None,
template_loader=loader, extra_context=None, post_save_redirect=None,
login_required=False, context_processors=None, form_class=None):
"""
Generic object-creation function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
new_object = form.save()
msg = ugettext("The %(verbose_name)s was created successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, new_object)
else:
form = form_class()
# Create the template, context, response
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
}, context_processors)
apply_extra_context(extra_context, c)
return HttpResponse(t.render(c))
def update_object(request, model=None, object_id=None, slug=None,
slug_field='slug', template_name=None, template_loader=loader,
extra_context=None, post_save_redirect=None, login_required=False,
context_processors=None, template_object_name='object',
form_class=None):
"""
Generic object-update function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
object
the original object being edited
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
obj = form.save()
msg = ugettext("The %(verbose_name)s was updated successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, obj)
else:
form = form_class(instance=obj)
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
def delete_object(request, model, post_delete_redirect, object_id=None,
slug=None, slug_field='slug', template_name=None,
template_loader=loader, extra_context=None, login_required=False,
context_processors=None, template_object_name='object'):
"""
Generic object-delete function.
The given template will be used to confirm deletetion if this view is
fetched using GET; for safty, deletion will only be performed if this
view is POSTed.
Templates: ``<app_label>/<model_name>_confirm_delete.html``
Context:
object
the original object being deleted
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
obj.delete()
msg = ugettext("The %(verbose_name)s was deleted.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(post_delete_redirect)
else:
if not template_name:
template_name = "%s/%s_confirm_delete.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
| bsd-3-clause |
gr-/birdie | setup.py | 2 | 1311 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'pyramid>=1.4',
'pyramid_chameleon', # for compatibility with pyramid 1.5 branch
'SQLAlchemy>=0.8',
'transaction',
'pyramid_tm',
'pyramid_debugtoolbar>=1.0.8',
'zope.sqlalchemy',
'waitress',
'repoze.timeago',
'cryptacular',
]
setup(name='birdie',
version='0.1',
description='birdie',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='gr-',
author_email='guillaume.raschia@gmail.com',
url='https://github.com/gr-',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='birdie',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = birdie:main
[console_scripts]
initialize_birdie_db = birdie.scripts.initializedb:main
""",
)
| mit |
jessekl/flixr | venv/lib/python2.7/site-packages/twilio/rest/resources/sip/domains.py | 48 | 5823 | from .. import InstanceResource, ListResource
class IpAccessControlListMapping(InstanceResource):
def delete(self):
"""
Remove this mapping (disassociate the ACL from the Domain).
"""
return self.parent.delete_instance(self.name)
class IpAccessControlListMappings(ListResource):
name = "IpAccessControlListMappings"
key = "ip_access_control_list_mappings"
instance = IpAccessControlListMapping
def create(self, ip_access_control_list_sid, **kwargs):
"""Add a :class:`CredentialListMapping` to this domain.
:param sid: String identifier for an existing
:class:`CredentialList`.
"""
kwargs.update(ip_access_control_list_sid=ip_access_control_list_sid)
return self.create_instance(kwargs)
def delete(self, sid):
"""Remove a :class:`CredentialListMapping` from this domain.
:param sid: String identifier for a CredentialList resource
"""
return self.delete_instance(sid)
class CredentialListMapping(InstanceResource):
def delete(self):
"""
Remove this mapping (disassociate the CredentialList from the Domain).
"""
return self.parent.delete_instance(self.name)
class CredentialListMappings(ListResource):
name = "CredentialListMappings"
key = "credential_list_mappings"
instance = CredentialListMapping
def create(self, credential_list_sid, **kwargs):
"""Add a :class:`CredentialListMapping` to this domain.
:param sid: String identifier for an existing
:class:`CredentialList`.
"""
kwargs.update(credential_list_sid=credential_list_sid)
return self.create_instance(kwargs)
def delete(self, sid):
"""Remove a :class:`CredentialListMapping` from this domain.
:param sid: String identifier for a CredentialList resource
"""
return self.delete_instance(sid)
class Domain(InstanceResource):
"""An inbound SIP Domain.
.. attribute:: sid
A 34 character string that uniquely identifies this resource.
.. attribute:: account_sid
The unique id of the Account responsible for this domain.
.. attribute:: domain_name
A unique domain name for this inbound SIP endpoint. Must end in
.sip.twilio.com.
.. attribute:: friendly_name
A human-readable name for this SIP domain. (restrictions?)
.. attribute:: auth_type
???
.. attribute:: voice_url
The URL Twilio will request when this domain receives a call.
.. attribute:: voice_method
The HTTP method Twilio will use when requesting the above voice_url.
Either GET or POST.
.. attribute:: voice_fallback_url
The URL that Twilio will request if an error occurs retrieving or
executing the TwiML requested by voice_url.
.. attribute:: voice_fallback_method
The HTTP method Twilio will use when requesting the voice_fallback_url.
Either GET or POST.
.. attribute:: voice_status_callback
The URL that Twilio will request to pass status parameters (such as
call ended) to your application.
.. attribute:: voice_status_callback_method
The HTTP method Twilio will use to make requests to the status_callback
URL. Either GET or POST.
.. attribute:: date_created
The date that this resource was created.
.. attribute:: date_updated
The date that this resource was last updated.
"""
subresources = [IpAccessControlListMappings, CredentialListMappings]
def update(self, **kwargs):
"""
Update this :class:`Domain`
Available attributes to update are described above as instance
attributes.
"""
return self.parent.update_instance(self.name, kwargs)
def delete(self):
"""
Delete this domain.
"""
return self.parent.delete_instance(self.name)
class Domains(ListResource):
name = "Domains"
key = "domains"
instance = Domain
def create(self, domain_name, **kwargs):
""" Create a :class:`Domain`.
:param str domain_name: A unique domain name ending in
'.sip.twilio.com'
:param str friendly_name: A human-readable name for this domain.
:param str voice_url: The URL Twilio will request when this domain
receives a call.
:param voice_method: The HTTP method Twilio should use to request
voice_url.
:type voice_method: None (defaults to 'POST'), 'GET', or 'POST'
:param str voice_fallback_url: A URL that Twilio will request if an
error occurs requesting or executing the TwiML at voice_url
:param str voice_fallback_method: The HTTP method that Twilio should
use to request the fallback_url
:type voice_fallback_method: None (defaults to 'POST'),
'GET', or 'POST'
:param str voice_status_callback: A URL that Twilio will request when
the call ends to notify your app.
:param str voice_status_method: The HTTP method Twilio should use when
requesting the above URL.
"""
kwargs['domain_name'] = domain_name
return self.create_instance(kwargs)
def update(self, sid, **kwargs):
"""
Update a :class:`Domain`
Available attributes to update are described above in :meth:`create`.
:param sid: String identifier for a Domain resource
"""
return self.update_instance(sid, kwargs)
def delete(self, sid):
"""
Delete a :class:`Domain`.
:param sid: String identifier for a Domain resource
"""
return self.delete_instance(sid)
| mit |
DennisDenuto/puppet-commonscripts | files/aws_cli/AWS-ElasticBeanstalk-CLI-2.6.3/eb/macosx/python3/lib/aws/requests/packages/charade/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| mit |
Cactuslegs/audacity-of-nope | lib-src/lv2/serd/waflib/Tools/ccroot.py | 14 | 12438 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Task,Utils,Node,Errors
from waflib.TaskGen import after_method,before_method,feature,taskgen_method,extension
from waflib.Tools import c_aliases,c_preproc,c_config,c_osx,c_tests
from waflib.Configure import conf
SYSTEM_LIB_PATHS=['/usr/lib64','/usr/lib','/usr/local/lib64','/usr/local/lib']
USELIB_VARS=Utils.defaultdict(set)
USELIB_VARS['c']=set(['INCLUDES','FRAMEWORKPATH','DEFINES','CPPFLAGS','CCDEPS','CFLAGS','ARCH'])
USELIB_VARS['cxx']=set(['INCLUDES','FRAMEWORKPATH','DEFINES','CPPFLAGS','CXXDEPS','CXXFLAGS','ARCH'])
USELIB_VARS['d']=set(['INCLUDES','DFLAGS'])
USELIB_VARS['includes']=set(['INCLUDES','FRAMEWORKPATH','ARCH'])
USELIB_VARS['cprogram']=USELIB_VARS['cxxprogram']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS','FRAMEWORK','FRAMEWORKPATH','ARCH'])
USELIB_VARS['cshlib']=USELIB_VARS['cxxshlib']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS','FRAMEWORK','FRAMEWORKPATH','ARCH'])
USELIB_VARS['cstlib']=USELIB_VARS['cxxstlib']=set(['ARFLAGS','LINKDEPS'])
USELIB_VARS['dprogram']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS'])
USELIB_VARS['dshlib']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS'])
USELIB_VARS['dstlib']=set(['ARFLAGS','LINKDEPS'])
USELIB_VARS['asm']=set(['ASFLAGS'])
@taskgen_method
def create_compiled_task(self,name,node):
out='%s.%d.o'%(node.name,self.idx)
task=self.create_task(name,node,node.parent.find_or_declare(out))
try:
self.compiled_tasks.append(task)
except AttributeError:
self.compiled_tasks=[task]
return task
@taskgen_method
def to_incnodes(self,inlst):
lst=[]
seen=set([])
for x in self.to_list(inlst):
if x in seen or not x:
continue
seen.add(x)
if isinstance(x,Node.Node):
lst.append(x)
else:
if os.path.isabs(x):
lst.append(self.bld.root.make_node(x)or x)
else:
if x[0]=='#':
p=self.bld.bldnode.make_node(x[1:])
v=self.bld.srcnode.make_node(x[1:])
else:
p=self.path.get_bld().make_node(x)
v=self.path.make_node(x)
if p.is_child_of(self.bld.bldnode):
p.mkdir()
lst.append(p)
lst.append(v)
return lst
@feature('c','cxx','d','asm','fc','includes')
@after_method('propagate_uselib_vars','process_source')
def apply_incpaths(self):
lst=self.to_incnodes(self.to_list(getattr(self,'includes',[]))+self.env['INCLUDES'])
self.includes_nodes=lst
self.env['INCPATHS']=[x.abspath()for x in lst]
class link_task(Task.Task):
color='YELLOW'
inst_to=None
chmod=Utils.O755
def add_target(self,target):
if isinstance(target,str):
pattern=self.env[self.__class__.__name__+'_PATTERN']
if not pattern:
pattern='%s'
folder,name=os.path.split(target)
if self.__class__.__name__.find('shlib')>0:
if self.env.DEST_BINFMT=='pe'and getattr(self.generator,'vnum',None):
name=name+'-'+self.generator.vnum.split('.')[0]
tmp=folder+os.sep+pattern%name
target=self.generator.path.find_or_declare(tmp)
self.set_outputs(target)
class stlink_task(link_task):
run_str='${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}'
def rm_tgt(cls):
old=cls.run
def wrap(self):
try:os.remove(self.outputs[0].abspath())
except OSError:pass
return old(self)
setattr(cls,'run',wrap)
rm_tgt(stlink_task)
@feature('c','cxx','d','fc','asm')
@after_method('process_source')
def apply_link(self):
for x in self.features:
if x=='cprogram'and'cxx'in self.features:
x='cxxprogram'
elif x=='cshlib'and'cxx'in self.features:
x='cxxshlib'
if x in Task.classes:
if issubclass(Task.classes[x],link_task):
link=x
break
else:
return
objs=[t.outputs[0]for t in getattr(self,'compiled_tasks',[])]
self.link_task=self.create_task(link,objs)
self.link_task.add_target(self.target)
try:
inst_to=self.install_path
except AttributeError:
inst_to=self.link_task.__class__.inst_to
if inst_to:
self.install_task=self.bld.install_files(inst_to,self.link_task.outputs[:],env=self.env,chmod=self.link_task.chmod)
@taskgen_method
def use_rec(self,name,**kw):
if name in self.tmp_use_not or name in self.tmp_use_seen:
return
try:
y=self.bld.get_tgen_by_name(name)
except Errors.WafError:
self.uselib.append(name)
self.tmp_use_not.add(name)
return
self.tmp_use_seen.append(name)
y.post()
y.tmp_use_objects=objects=kw.get('objects',True)
y.tmp_use_stlib=stlib=kw.get('stlib',True)
try:
link_task=y.link_task
except AttributeError:
y.tmp_use_var=''
else:
objects=False
if not isinstance(link_task,stlink_task):
stlib=False
y.tmp_use_var='LIB'
else:
y.tmp_use_var='STLIB'
p=self.tmp_use_prec
for x in self.to_list(getattr(y,'use',[])):
try:
p[x].append(name)
except KeyError:
p[x]=[name]
self.use_rec(x,objects=objects,stlib=stlib)
@feature('c','cxx','d','use','fc')
@before_method('apply_incpaths','propagate_uselib_vars')
@after_method('apply_link','process_source')
def process_use(self):
use_not=self.tmp_use_not=set([])
self.tmp_use_seen=[]
use_prec=self.tmp_use_prec={}
self.uselib=self.to_list(getattr(self,'uselib',[]))
self.includes=self.to_list(getattr(self,'includes',[]))
names=self.to_list(getattr(self,'use',[]))
for x in names:
self.use_rec(x)
for x in use_not:
if x in use_prec:
del use_prec[x]
out=[]
tmp=[]
for x in self.tmp_use_seen:
for k in use_prec.values():
if x in k:
break
else:
tmp.append(x)
while tmp:
e=tmp.pop()
out.append(e)
try:
nlst=use_prec[e]
except KeyError:
pass
else:
del use_prec[e]
for x in nlst:
for y in use_prec:
if x in use_prec[y]:
break
else:
tmp.append(x)
if use_prec:
raise Errors.WafError('Cycle detected in the use processing %r'%use_prec)
out.reverse()
link_task=getattr(self,'link_task',None)
for x in out:
y=self.bld.get_tgen_by_name(x)
var=y.tmp_use_var
if var and link_task:
if var=='LIB'or y.tmp_use_stlib:
self.env.append_value(var,[y.target[y.target.rfind(os.sep)+1:]])
self.link_task.dep_nodes.extend(y.link_task.outputs)
tmp_path=y.link_task.outputs[0].parent.path_from(self.bld.bldnode)
self.env.append_value(var+'PATH',[tmp_path])
else:
if y.tmp_use_objects:
self.add_objects_from_tgen(y)
if getattr(y,'export_includes',None):
self.includes.extend(y.to_incnodes(y.export_includes))
for x in names:
try:
y=self.bld.get_tgen_by_name(x)
except Exception:
if not self.env['STLIB_'+x]and not x in self.uselib:
self.uselib.append(x)
else:
for k in self.to_list(getattr(y,'uselib',[])):
if not self.env['STLIB_'+k]and not k in self.uselib:
self.uselib.append(k)
@taskgen_method
def accept_node_to_link(self,node):
return not node.name.endswith('.pdb')
@taskgen_method
def add_objects_from_tgen(self,tg):
try:
link_task=self.link_task
except AttributeError:
pass
else:
for tsk in getattr(tg,'compiled_tasks',[]):
for x in tsk.outputs:
if self.accept_node_to_link(x):
link_task.inputs.append(x)
@taskgen_method
def get_uselib_vars(self):
_vars=set([])
for x in self.features:
if x in USELIB_VARS:
_vars|=USELIB_VARS[x]
return _vars
@feature('c','cxx','d','fc','javac','cs','uselib','asm')
@after_method('process_use')
def propagate_uselib_vars(self):
_vars=self.get_uselib_vars()
env=self.env
for x in _vars:
y=x.lower()
env.append_unique(x,self.to_list(getattr(self,y,[])))
for x in self.features:
for var in _vars:
compvar='%s_%s'%(var,x)
env.append_value(var,env[compvar])
for x in self.to_list(getattr(self,'uselib',[])):
for v in _vars:
env.append_value(v,env[v+'_'+x])
@feature('cshlib','cxxshlib','fcshlib')
@after_method('apply_link')
def apply_implib(self):
if not self.env.DEST_BINFMT=='pe':
return
dll=self.link_task.outputs[0]
if isinstance(self.target,Node.Node):
name=self.target.name
else:
name=os.path.split(self.target)[1]
implib=self.env['implib_PATTERN']%name
implib=dll.parent.find_or_declare(implib)
self.env.append_value('LINKFLAGS',self.env['IMPLIB_ST']%implib.bldpath())
self.link_task.outputs.append(implib)
if getattr(self,'defs',None)and self.env.DEST_BINFMT=='pe':
node=self.path.find_resource(self.defs)
if not node:
raise Errors.WafError('invalid def file %r'%self.defs)
if'msvc'in(self.env.CC_NAME,self.env.CXX_NAME):
self.env.append_value('LINKFLAGS','/def:%s'%node.path_from(self.bld.bldnode))
self.link_task.dep_nodes.append(node)
else:
self.link_task.inputs.append(node)
try:
inst_to=self.install_path
except AttributeError:
inst_to=self.link_task.__class__.inst_to
if not inst_to:
return
self.implib_install_task=self.bld.install_as('${LIBDIR}/%s'%implib.name,implib,self.env)
@feature('cshlib','cxxshlib','dshlib','fcshlib','vnum')
@after_method('apply_link','propagate_uselib_vars')
def apply_vnum(self):
if not getattr(self,'vnum','')or os.name!='posix'or self.env.DEST_BINFMT not in('elf','mac-o'):
return
link=self.link_task
nums=self.vnum.split('.')
node=link.outputs[0]
libname=node.name
if libname.endswith('.dylib'):
name3=libname.replace('.dylib','.%s.dylib'%self.vnum)
name2=libname.replace('.dylib','.%s.dylib'%nums[0])
else:
name3=libname+'.'+self.vnum
name2=libname+'.'+nums[0]
if self.env.SONAME_ST:
v=self.env.SONAME_ST%name2
self.env.append_value('LINKFLAGS',v.split())
self.create_task('vnum',node,[node.parent.find_or_declare(name2),node.parent.find_or_declare(name3)])
if getattr(self.bld,'is_install',None):
self.install_task.hasrun=Task.SKIP_ME
bld=self.bld
path=self.install_task.dest
t1=bld.install_as(path+os.sep+name3,node,env=self.env,chmod=self.link_task.chmod)
t2=bld.symlink_as(path+os.sep+name2,name3)
t3=bld.symlink_as(path+os.sep+libname,name3)
self.vnum_install_task=(t1,t2,t3)
if'-dynamiclib'in self.env['LINKFLAGS']:
try:
inst_to=self.install_path
except AttributeError:
inst_to=self.link_task.__class__.inst_to
if inst_to:
p=Utils.subst_vars(inst_to,self.env)
path=os.path.join(p,self.link_task.outputs[0].name)
self.env.append_value('LINKFLAGS',['-install_name',path])
class vnum(Task.Task):
color='CYAN'
quient=True
ext_in=['.bin']
def run(self):
for x in self.outputs:
path=x.abspath()
try:
os.remove(path)
except OSError:
pass
try:
os.symlink(self.inputs[0].name,path)
except OSError:
return 1
class fake_shlib(link_task):
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
class fake_stlib(stlink_task):
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
@conf
def read_shlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='shlib')
@conf
def read_stlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='stlib')
lib_patterns={'shlib':['lib%s.so','%s.so','lib%s.dylib','lib%s.dll','%s.dll'],'stlib':['lib%s.a','%s.a','lib%s.dll','%s.dll','lib%s.lib','%s.lib'],}
@feature('fake_lib')
def process_lib(self):
node=None
names=[x%self.name for x in lib_patterns[self.lib_type]]
for x in self.lib_paths+[self.path]+SYSTEM_LIB_PATHS:
if not isinstance(x,Node.Node):
x=self.bld.root.find_node(x)or self.path.find_node(x)
if not x:
continue
for y in names:
node=x.find_node(y)
if node:
node.sig=Utils.h_file(node.abspath())
break
else:
continue
break
else:
raise Errors.WafError('could not find library %r'%self.name)
self.link_task=self.create_task('fake_%s'%self.lib_type,[],[node])
self.target=self.name
class fake_o(Task.Task):
def runnable_status(self):
return Task.SKIP_ME
@extension('.o','.obj')
def add_those_o_files(self,node):
tsk=self.create_task('fake_o',[],node)
try:
self.compiled_tasks.append(tsk)
except AttributeError:
self.compiled_tasks=[tsk]
@feature('fake_obj')
@before_method('process_source')
def process_objs(self):
for node in self.to_nodes(self.source):
self.add_those_o_files(node)
self.source=[]
@conf
def read_object(self,obj):
if not isinstance(obj,self.path.__class__):
obj=self.path.find_resource(obj)
return self(features='fake_obj',source=obj,name=obj.name)
| gpl-2.0 |
stankovski/AutoRest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/CustomBaseUri/autorestparameterizedhosttestclient/operations/paths.py | 5 | 2733 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class Paths(object):
"""Paths operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_empty(
self, account_name, custom_headers={}, raw=False, **operation_config):
"""
Get a 200 to test a valid base uri
:param account_name: Account Name
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/customuri'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'host': self._serialize.url("self.config.host", self.config.host, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| mit |
benschmaus/catapult | tracing/tracing/mre/threaded_work_queue.py | 3 | 2723 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import threading
import traceback
import Queue
class ThreadedWorkQueue(object):
def __init__(self, num_threads):
self._num_threads = num_threads
self._main_thread_tasks = None
self._any_thread_tasks = None
self._running = False
self._stop = False
self._stop_result = None
self.Reset()
@property
def is_running(self):
return self._running
def Run(self):
if self.is_running:
raise Exception('Already running')
self._running = True
self._stop = False
self._stop_result = None
if self._num_threads == 1:
self._RunSingleThreaded()
else:
self._RunMultiThreaded()
self._main_thread_tasks = Queue.Queue()
self._any_thread_tasks = Queue.Queue()
r = self._stop_result
self._stop_result = None
self._running = False
return r
def Stop(self, stop_result=None):
if not self.is_running:
raise Exception('Not running')
if self._stop:
return False
self._stop_result = stop_result
self._stop = True
return True
def Reset(self):
assert not self.is_running
self._main_thread_tasks = Queue.Queue()
self._any_thread_tasks = Queue.Queue()
def PostMainThreadTask(self, cb, *args, **kwargs):
def RunTask():
cb(*args, **kwargs)
self._main_thread_tasks.put(RunTask)
def PostAnyThreadTask(self, cb, *args, **kwargs):
def RunTask():
cb(*args, **kwargs)
self._any_thread_tasks.put(RunTask)
def _TryToRunOneTask(self, queue, block=False):
if block:
try:
task = queue.get(True, 0.1)
except Queue.Empty:
return
else:
if queue.empty():
return
task = queue.get()
try:
task()
except KeyboardInterrupt as ex:
raise ex
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
queue.task_done()
def _RunSingleThreaded(self):
while True:
if self._stop:
break
self._TryToRunOneTask(self._any_thread_tasks)
self._TryToRunOneTask(self._main_thread_tasks)
def _RunMultiThreaded(self):
threads = []
for _ in range(self._num_threads):
t = threading.Thread(target=self._ThreadMain)
t.setDaemon(True)
t.start()
threads.append(t)
while True:
if self._stop:
break
self._TryToRunOneTask(self._main_thread_tasks)
for t in threads:
t.join()
def _ThreadMain(self):
while True:
if self._stop:
break
self._TryToRunOneTask(self._any_thread_tasks, block=True)
| bsd-3-clause |
punkkeks/OctoPrint | tests/filemanager/test_localstorage.py | 30 | 15870 | # coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import unittest
import os
import mock
from ddt import ddt, unpack, data
import octoprint.filemanager.storage
class FileWrapper(object):
def __init__(self, filename):
self.path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "_files", filename)
import hashlib
blocksize = 65536
hash = hashlib.sha1()
with open(self.path, "rb") as f:
buffer = f.read(blocksize)
while len(buffer) > 0:
hash.update(buffer)
buffer = f.read(blocksize)
self.hash = hash.hexdigest()
def save(self, destination):
import shutil
shutil.copy(self.path, destination)
FILE_BP_CASE_STL = FileWrapper("bp_case.stl")
FILE_BP_CASE_GCODE = FileWrapper("bp_case.gcode")
FILE_CRAZYRADIO_STL = FileWrapper("crazyradio.stl")
@ddt
class LocalStorageTest(unittest.TestCase):
def setUp(self):
import tempfile
self.basefolder = tempfile.mkdtemp()
self.storage = octoprint.filemanager.storage.LocalFileStorage(self.basefolder)
# mock file manager module
self.filemanager_patcher = mock.patch("octoprint.filemanager")
self.filemanager = self.filemanager_patcher.start()
self.filemanager.valid_file_type.return_value = True
def get_file_type(name):
if name.lower().endswith(".stl"):
return ["model", "stl"]
elif name.lower().endswith(".gco") or name.lower().endswith(".gcode") or name.lower.endswith(".g"):
return ["machinecode", "gcode"]
else:
return None
self.filemanager.get_file_type.side_effect = get_file_type
def tearDown(self):
import shutil
shutil.rmtree(self.basefolder)
self.filemanager_patcher.stop()
def test_add_file(self):
self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
def test_add_file_overwrite(self):
self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
try:
self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL, overwrite=False)
except:
pass
self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL, overwrite=True)
def test_add_file_with_web(self):
import time
href = "http://www.example.com"
retrieved = time.time()
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL, links=[("web", dict(href=href, retrieved=retrieved))])
stl_metadata = self.storage.get_metadata(stl_name)
self.assertIsNotNone(stl_metadata)
self.assertEquals(1, len(stl_metadata["links"]))
link = stl_metadata["links"][0]
self.assertTrue("web", link["rel"])
self.assertTrue("href" in link)
self.assertEquals(href, link["href"])
self.assertTrue("retrieved" in link)
self.assertEquals(retrieved, link["retrieved"])
def test_add_file_with_association(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE, links=[("model", dict(name=stl_name))])
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
# forward link
self.assertEquals(1, len(gcode_metadata["links"]))
link = gcode_metadata["links"][0]
self.assertEquals("model", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(stl_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_STL.hash, link["hash"])
# reverse link
self.assertEquals(1, len(stl_metadata["links"]))
link = stl_metadata["links"][0]
self.assertEquals("machinecode", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(gcode_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_GCODE.hash, link["hash"])
def test_remove_file(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE, links=[("model", dict(name=stl_name))])
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
self.assertIsNotNone(stl_metadata)
self.assertIsNotNone(gcode_metadata)
self.storage.remove_file(stl_name)
self.assertFalse(os.path.exists(os.path.join(self.basefolder, stl_name)))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
self.assertIsNone(stl_metadata)
self.assertIsNotNone(gcode_metadata)
self.assertEquals(0, len(gcode_metadata["links"]))
def test_add_folder(self):
self._add_folder("test", "test")
def test_add_subfolder(self):
folder_name = self._add_folder("folder with some spaces", "folder_with_some_spaces")
subfolder_name = self._add_folder((folder_name, "subfolder"), folder_name + "/subfolder")
stl_name = self._add_file((subfolder_name, "bp_case.stl"), subfolder_name + "/bp_case.stl", FILE_BP_CASE_STL)
self.assertTrue(os.path.exists(os.path.join(self.basefolder, folder_name)))
self.assertTrue(os.path.exists(os.path.join(self.basefolder, subfolder_name)))
self.assertTrue(os.path.exists(os.path.join(self.basefolder, stl_name)))
def test_remove_folder(self):
content_folder = self._add_folder("content", "content")
other_stl_name = self._add_file((content_folder, "crazyradio.stl"), content_folder + "/crazyradio.stl", FILE_CRAZYRADIO_STL)
empty_folder = self._add_folder("empty", "empty")
try:
self.storage.remove_folder(content_folder, recursive=False)
except:
self.assertTrue(os.path.exists(os.path.join(self.basefolder, content_folder)))
self.assertTrue(os.path.isdir(os.path.join(self.basefolder, content_folder)))
self.assertTrue(os.path.exists(os.path.join(self.basefolder, other_stl_name)))
self.assertIsNotNone(self.storage.get_metadata(other_stl_name))
self.storage.remove_folder(content_folder, recursive=True)
self.assertFalse(os.path.exists(os.path.join(self.basefolder, content_folder)))
self.assertFalse(os.path.isdir(os.path.join(self.basefolder, content_folder)))
self.storage.remove_folder(empty_folder, recursive=False)
self.assertFalse(os.path.exists(os.path.join(self.basefolder, empty_folder)))
self.assertFalse(os.path.isdir(os.path.join(self.basefolder, empty_folder)))
def test_remove_folder_with_metadata(self):
content_folder = self._add_folder("content", "content")
other_stl_name = self._add_file((content_folder, "crazyradio.stl"), content_folder + "/crazyradio.stl", FILE_CRAZYRADIO_STL)
self.storage.remove_file(other_stl_name)
self.storage.remove_folder(content_folder, recursive=False)
def test_list(self):
bp_case_stl = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE, links=[("model", dict(name=bp_case_stl))])
content_folder = self._add_folder("content", "content")
self._add_file((content_folder, "crazyradio.stl"), content_folder + "/crazyradio.stl", FILE_CRAZYRADIO_STL)
self._add_folder("empty", "empty")
file_list = self.storage.list_files()
self.assertEquals(4, len(file_list))
self.assertTrue("bp_case.stl" in file_list)
self.assertTrue("bp_case.gcode" in file_list)
self.assertTrue("content" in file_list)
self.assertTrue("empty" in file_list)
self.assertEquals("model", file_list["bp_case.stl"]["type"])
self.assertEquals(FILE_BP_CASE_STL.hash, file_list["bp_case.stl"]["hash"])
self.assertEquals("machinecode", file_list["bp_case.gcode"]["type"])
self.assertEquals(FILE_BP_CASE_GCODE.hash, file_list["bp_case.gcode"]["hash"])
self.assertEquals("folder", file_list[content_folder]["type"])
self.assertEquals(1, len(file_list[content_folder]["children"]))
self.assertTrue("crazyradio.stl" in file_list["content"]["children"])
self.assertEquals("model", file_list["content"]["children"]["crazyradio.stl"]["type"])
self.assertEquals(FILE_CRAZYRADIO_STL.hash, file_list["content"]["children"]["crazyradio.stl"]["hash"])
self.assertEquals("folder", file_list["empty"]["type"])
self.assertEquals(0, len(file_list["empty"]["children"]))
def test_add_link_model(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE)
self.storage.add_link(gcode_name, "model", dict(name=stl_name))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
# forward link
self.assertEquals(1, len(gcode_metadata["links"]))
link = gcode_metadata["links"][0]
self.assertEquals("model", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(stl_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_STL.hash, link["hash"])
# reverse link
self.assertEquals(1, len(stl_metadata["links"]))
link = stl_metadata["links"][0]
self.assertEquals("machinecode", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(gcode_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_GCODE.hash, link["hash"])
def test_add_link_machinecode(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE)
self.storage.add_link(stl_name, "machinecode", dict(name=gcode_name))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
# forward link
self.assertEquals(1, len(gcode_metadata["links"]))
link = gcode_metadata["links"][0]
self.assertEquals("model", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(stl_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_STL.hash, link["hash"])
# reverse link
self.assertEquals(1, len(stl_metadata["links"]))
link = stl_metadata["links"][0]
self.assertEquals("machinecode", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(gcode_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_GCODE.hash, link["hash"])
def test_remove_link(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
self.storage.add_link(stl_name, "web", dict(href="http://www.example.com"))
self.storage.add_link(stl_name, "web", dict(href="http://www.example2.com"))
stl_metadata = self.storage.get_metadata(stl_name)
self.assertEquals(2, len(stl_metadata["links"]))
self.storage.remove_link(stl_name, "web", dict(href="http://www.example.com"))
stl_metadata = self.storage.get_metadata(stl_name)
self.assertEquals(1, len(stl_metadata["links"]))
self.storage.remove_link(stl_name, "web", dict(href="wrong_href"))
stl_metadata = self.storage.get_metadata(stl_name)
self.assertEquals(1, len(stl_metadata["links"]))
def test_remove_link_bidirectional(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE)
self.storage.add_link(stl_name, "machinecode", dict(name=gcode_name))
self.storage.add_link(stl_name, "web", dict(href="http://www.example.com"))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
self.assertEquals(1, len(gcode_metadata["links"]))
self.assertEquals(2, len(stl_metadata["links"]))
self.storage.remove_link(gcode_name, "model", dict(name=stl_name, hash=FILE_BP_CASE_STL.hash))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
self.assertEquals(0, len(gcode_metadata["links"]))
self.assertEquals(1, len(stl_metadata["links"]))
@data(
("some_file.gco", "some_file.gco"),
("some_file with (parentheses) and ümläuts and digits 123.gco", "some_file_with_(parentheses)_and_mluts_and_digits_123.gco"),
("pengüino pequeño.stl", "pengino_pequeo.stl")
)
@unpack
def test_sanitize_name(self, input, expected):
actual = self.storage.sanitize_name(input)
self.assertEquals(expected, actual)
@data(
"some/folder/still/left.gco",
"also\\no\\backslashes.gco"
)
def test_sanitize_name_invalid(self, input):
try:
self.storage.sanitize_name(input)
self.fail("expected a ValueError")
except ValueError as e:
self.assertEquals("name must not contain / or \\", e.message)
@data(
("folder/with/subfolder", "/folder/with/subfolder"),
("folder/with/subfolder/../other/folder", "/folder/with/other/folder"),
("/folder/with/leading/slash", "/folder/with/leading/slash"),
("folder/with/leading/dot", "/folder/with/leading/dot")
)
@unpack
def test_sanitize_path(self, input, expected):
actual = self.storage.sanitize_path(input)
self.assertTrue(actual.startswith(self.basefolder))
self.assertEquals(expected, actual[len(self.basefolder):].replace(os.path.sep, "/"))
@data(
"../../folder/out/of/the/basefolder",
"some/folder/../../../and/then/back"
)
def test_sanitize_path_invalid(self, input):
try:
self.storage.sanitize_path(input)
self.fail("expected a ValueError")
except ValueError as e:
self.assertTrue(e.message.startswith("path not contained in base folder: "))
@data(
("some/folder/and/some file.gco", "/some/folder/and", "some_file.gco"),
(("some", "folder", "and", "some file.gco"), "/some/folder/and", "some_file.gco"),
("some file.gco", "/", "some_file.gco"),
(("some file.gco",), "/", "some_file.gco"),
("", "/", ""),
("some/folder/with/trailing/slash/", "/some/folder/with/trailing/slash", ""),
(("some", "folder", ""), "/some/folder", "")
)
@unpack
def test_sanitize(self, input, expected_path, expected_name):
actual = self.storage.sanitize(input)
self.assertTrue(isinstance(actual, tuple))
self.assertEquals(2, len(actual))
actual_path, actual_name = actual
self.assertTrue(actual_path.startswith(self.basefolder))
actual_path = actual_path[len(self.basefolder):].replace(os.path.sep, "/")
if not actual_path.startswith("/"):
# if the actual path originally was just the base folder, we just stripped
# away everything, so let's add a / again so the behaviour matches the
# other preprocessing of our test data here
actual_path = "/" + actual_path
self.assertEquals(expected_path, actual_path)
self.assertEquals(expected_name, actual_name)
def _add_file(self, path, expected_path, file_object, links=None, overwrite=False):
sanitized_path = self.storage.add_file(path, file_object, links=links, allow_overwrite=overwrite)
split_path = sanitized_path.split("/")
if len(split_path) == 1:
file_path = os.path.join(self.basefolder, split_path[0])
folder_path = self.basefolder
else:
file_path = os.path.join(self.basefolder, os.path.join(*split_path))
folder_path = os.path.join(self.basefolder, os.path.join(*split_path[:-1]))
self.assertEquals(expected_path, sanitized_path)
self.assertTrue(os.path.exists(file_path))
self.assertTrue(os.path.exists(os.path.join(folder_path, ".metadata.yaml")))
metadata = self.storage.get_metadata(sanitized_path)
self.assertIsNotNone(metadata)
# assert hash
self.assertTrue("hash" in metadata)
self.assertEquals(file_object.hash, metadata["hash"])
# assert presence of links if supplied
if links:
self.assertTrue("links" in metadata)
return sanitized_path
def _add_folder(self, path, expected_path):
sanitized_path = self.storage.add_folder(path)
self.assertEquals(expected_path, sanitized_path)
self.assertTrue(os.path.exists(os.path.join(self.basefolder, os.path.join(*sanitized_path.split("/")))))
self.assertTrue(os.path.isdir(os.path.join(self.basefolder, os.path.join(*sanitized_path.split("/")))))
return sanitized_path
| agpl-3.0 |
with-git/tensorflow | tensorflow/python/training/saver_large_variable_test.py | 163 | 2326 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
class SaverLargeVariableTest(test.TestCase):
# NOTE: This is in a separate file from saver_test.py because the
# large allocations do not play well with TSAN, and cause flaky
# failures.
def testLargeVariable(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
with session.Session("", graph=ops.Graph()) as sess:
# Declare a variable that is exactly 2GB. This should fail,
# because a serialized checkpoint includes other header
# metadata.
with ops.device("/cpu:0"):
var = variables.Variable(
constant_op.constant(
False, shape=[2, 1024, 1024, 1024], dtype=dtypes.bool))
save = saver.Saver(
{
var.op.name: var
}, write_version=saver_pb2.SaverDef.V1)
var.initializer.run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Tensor slice is too large to serialize"):
save.save(sess, save_path)
if __name__ == "__main__":
test.main()
| apache-2.0 |
yusiwen/oh-my-zsh | plugins/aliases/termcolor.py | 168 | 5044 | # coding: utf-8
# Copyright (c) 2008-2011 Volvox Development Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Konstantin Lepa <konstantin.lepa@gmail.com>
"""ANSII Color formatting for output in terminal."""
from __future__ import print_function
import os
__ALL__ = [ 'colored', 'cprint' ]
VERSION = (1, 1, 0)
ATTRIBUTES = dict(
list(zip([
'bold',
'dark',
'',
'underline',
'blink',
'',
'reverse',
'concealed'
],
list(range(1, 9))
))
)
del ATTRIBUTES['']
HIGHLIGHTS = dict(
list(zip([
'on_grey',
'on_red',
'on_green',
'on_yellow',
'on_blue',
'on_magenta',
'on_cyan',
'on_white'
],
list(range(40, 48))
))
)
COLORS = dict(
list(zip([
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
],
list(range(30, 38))
))
)
RESET = '\033[0m'
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
print((colored(text, color, on_color, attrs)), **kwargs)
if __name__ == '__main__':
print('Current terminal type: %s' % os.getenv('TERM'))
print('Test basic colors:')
cprint('Grey color', 'grey')
cprint('Red color', 'red')
cprint('Green color', 'green')
cprint('Yellow color', 'yellow')
cprint('Blue color', 'blue')
cprint('Magenta color', 'magenta')
cprint('Cyan color', 'cyan')
cprint('White color', 'white')
print(('-' * 78))
print('Test highlights:')
cprint('On grey color', on_color='on_grey')
cprint('On red color', on_color='on_red')
cprint('On green color', on_color='on_green')
cprint('On yellow color', on_color='on_yellow')
cprint('On blue color', on_color='on_blue')
cprint('On magenta color', on_color='on_magenta')
cprint('On cyan color', on_color='on_cyan')
cprint('On white color', color='grey', on_color='on_white')
print('-' * 78)
print('Test attributes:')
cprint('Bold grey color', 'grey', attrs=['bold'])
cprint('Dark red color', 'red', attrs=['dark'])
cprint('Underline green color', 'green', attrs=['underline'])
cprint('Blink yellow color', 'yellow', attrs=['blink'])
cprint('Reversed blue color', 'blue', attrs=['reverse'])
cprint('Concealed Magenta color', 'magenta', attrs=['concealed'])
cprint('Bold underline reverse cyan color', 'cyan',
attrs=['bold', 'underline', 'reverse'])
cprint('Dark blink concealed white color', 'white',
attrs=['dark', 'blink', 'concealed'])
print(('-' * 78))
print('Test mixing:')
cprint('Underline red on grey color', 'red', 'on_grey',
['underline'])
cprint('Reversed green on red color', 'green', 'on_red', ['reverse'])
| mit |
woobe/h2o | py/testdir_hosts/test_parse_summary_airline_s3n_fvec.py | 2 | 3452 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_glm, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
# all hdfs info is done thru the hdfs_config michal's ec2 config sets up?
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_summary_airline_s3n_fvec(self):
h2o.beta_features = True
csvFilelist = [
("allyears2k.csv", 300), #4.4MB
("year1987.csv", 600), #130MB
("allyears.csv", 900), #12GB
# ("allyears_10.csv", 1800), #119.98GB
]
print "Need the trailing / on the bucket, to avoid 'Path must be absolute error'"
(importHDFSResult, importPattern) = h2i.import_only(bucket='h2o-airlines-unpacked/', path="*", schema='s3n')
print "\nTrying StoreView after the import hdfs"
h2o_cmd.runStoreView(timeoutSecs=120)
trial = 0
for (csvFilename, timeoutSecs) in csvFilelist:
trialStart = time.time()
csvPathname = csvFilename
# PARSE****************************************
csvPathname = csvFilename
hex_key = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='h2o-airlines-unpacked', path=csvPathname, schema='s3n', hex_key=hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=120)
elapsed = time.time() - start
print "parse end on ", parseResult['destination_key'], 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
# INSPECT******************************************
# We should be able to see the parse result?
start = time.time()
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
print "Inspect:", parseResult['destination_key'], "took", time.time() - start, "seconds"
h2o_cmd.infoFromInspect(inspect, csvPathname)
# gives us some reporting on missing values, constant values, to see if we have x specified well
# figures out everything from parseResult['destination_key']
# needs y to avoid output column (which can be index or name)
# assume all the configs have the same y..just check with the firs tone
goodX = h2o_glm.goodXFromColumnInfo(y='IsArrDelayed', key=parseResult['destination_key'], timeoutSecs=300)
# SUMMARY****************************************
summaryResult = h2o_cmd.runSummary(key=hex_key, timeoutSecs=360)
h2o_cmd.infoFromSummary(summaryResult)
# STOREVIEW***************************************
print "\nTrying StoreView after the parse"
h2o_cmd.runStoreView(timeoutSecs=120)
print "Trial #", trial, "completed in", time.time() - trialStart, "seconds."
trial += 1
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
haematologic/cellcountr | cellcounter/main/migrations/0001_initial.py | 2 | 19590 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CellCountInstance'
db.create_table('main_cellcountinstance', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('datetime_submitted', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('datetime_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('tissue_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('overall_comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['CellCountInstance'])
# Adding model 'BoneMarrowBackground'
db.create_table('main_bonemarrowbackground', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('trail_cellularity', self.gf('django.db.models.fields.CharField')(max_length=50)),
('particle_cellularity', self.gf('django.db.models.fields.CharField')(max_length=50)),
('particulate', self.gf('django.db.models.fields.CharField')(max_length=50)),
('haemodilution', self.gf('django.db.models.fields.CharField')(max_length=50)),
('site', self.gf('django.db.models.fields.CharField')(max_length=50)),
('ease_of_aspiration', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('main', ['BoneMarrowBackground'])
# Adding model 'CellType'
db.create_table('main_celltype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('readable_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('machine_name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['CellType'])
# Adding model 'CellCount'
db.create_table('main_cellcount', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CellCountInstance'])),
('cell', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CellType'])),
('normal_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('abnormal_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['CellCount'])
# Adding model 'ErythropoiesisFindings'
db.create_table('main_erythropoiesisfindings', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('no_dysplasia', self.gf('django.db.models.fields.BooleanField')(default=True)),
('nuclear_asynchrony', self.gf('django.db.models.fields.BooleanField')(default=False)),
('multinucleated_forms', self.gf('django.db.models.fields.BooleanField')(default=False)),
('ragged_haemoglobinisation', self.gf('django.db.models.fields.BooleanField')(default=False)),
('megaloblastic_change', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['ErythropoiesisFindings'])
# Adding model 'GranulopoiesisFindings'
db.create_table('main_granulopoiesisfindings', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('no_dysplasia', self.gf('django.db.models.fields.BooleanField')(default=True)),
('hypogranular', self.gf('django.db.models.fields.BooleanField')(default=False)),
('pelger', self.gf('django.db.models.fields.BooleanField')(default=False)),
('nuclear_atypia', self.gf('django.db.models.fields.BooleanField')(default=False)),
('dohle_bodies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['GranulopoiesisFindings'])
# Adding model 'MegakaryocyteFeatures'
db.create_table('main_megakaryocytefeatures', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('relative_count', self.gf('django.db.models.fields.CharField')(max_length=50)),
('no_dysplasia', self.gf('django.db.models.fields.BooleanField')(default=True)),
('hypolobulated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('fragmented', self.gf('django.db.models.fields.BooleanField')(default=False)),
('micromegakaryocytes', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['MegakaryocyteFeatures'])
# Adding model 'IronStain'
db.create_table('main_ironstain', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('stain_performed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('iron_content', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('ringed_sideroblasts', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['IronStain'])
# Adding model 'CellImage'
db.create_table('main_cellimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')()),
('file', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('celltype', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CellType'])),
))
db.send_create_signal('main', ['CellImage'])
# Adding model 'SimilarLookingGroup'
db.create_table('main_similarlookinggroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('main', ['SimilarLookingGroup'])
# Adding M2M table for field cell_image on 'SimilarLookingGroup'
db.create_table('main_similarlookinggroup_cell_image', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('similarlookinggroup', models.ForeignKey(orm['main.similarlookinggroup'], null=False)),
('cellimage', models.ForeignKey(orm['main.cellimage'], null=False))
))
db.create_unique('main_similarlookinggroup_cell_image', ['similarlookinggroup_id', 'cellimage_id'])
def backwards(self, orm):
# Deleting model 'CellCountInstance'
db.delete_table('main_cellcountinstance')
# Deleting model 'BoneMarrowBackground'
db.delete_table('main_bonemarrowbackground')
# Deleting model 'CellType'
db.delete_table('main_celltype')
# Deleting model 'CellCount'
db.delete_table('main_cellcount')
# Deleting model 'ErythropoiesisFindings'
db.delete_table('main_erythropoiesisfindings')
# Deleting model 'GranulopoiesisFindings'
db.delete_table('main_granulopoiesisfindings')
# Deleting model 'MegakaryocyteFeatures'
db.delete_table('main_megakaryocytefeatures')
# Deleting model 'IronStain'
db.delete_table('main_ironstain')
# Deleting model 'CellImage'
db.delete_table('main_cellimage')
# Deleting model 'SimilarLookingGroup'
db.delete_table('main_similarlookinggroup')
# Removing M2M table for field cell_image on 'SimilarLookingGroup'
db.delete_table('main_similarlookinggroup_cell_image')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.bonemarrowbackground': {
'Meta': {'object_name': 'BoneMarrowBackground'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'ease_of_aspiration': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'haemodilution': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'particle_cellularity': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'particulate': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'trail_cellularity': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'main.cellcount': {
'Meta': {'object_name': 'CellCount'},
'abnormal_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'cell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.CellType']"}),
'cell_count_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.CellCountInstance']"}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'normal_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'main.cellcountinstance': {
'Meta': {'object_name': 'CellCountInstance'},
'datetime_submitted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'overall_comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tissue_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'main.cellimage': {
'Meta': {'object_name': 'CellImage'},
'celltype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.CellType']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.celltype': {
'Meta': {'object_name': 'CellType'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'readable_name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'main.erythropoiesisfindings': {
'Meta': {'object_name': 'ErythropoiesisFindings'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'megaloblastic_change': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'multinucleated_forms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'no_dysplasia': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'nuclear_asynchrony': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ragged_haemoglobinisation': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.granulopoiesisfindings': {
'Meta': {'object_name': 'GranulopoiesisFindings'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dohle_bodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypogranular': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_dysplasia': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'nuclear_atypia': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pelger': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.ironstain': {
'Meta': {'object_name': 'IronStain'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iron_content': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ringed_sideroblasts': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'stain_performed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.megakaryocytefeatures': {
'Meta': {'object_name': 'MegakaryocyteFeatures'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fragmented': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypolobulated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'micromegakaryocytes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'no_dysplasia': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'relative_count': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'main.similarlookinggroup': {
'Meta': {'object_name': 'SimilarLookingGroup'},
'cell_image': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.CellImage']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['main'] | mit |
makerbot/conveyor | start-dev.py | 1 | 3339 | #! /usr/bin/env python
# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
# conveyor/start.py
#
# conveyor - Printing dispatch engine for 3D objects and their friends.
# Copyright © 2012 Matthew W. Samsonoff <matthew.samsonoff@makerbot.com>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''A Python-based startup script for conveyor.'''
from __future__ import (absolute_import, print_function, unicode_literals)
import json
import os
import os.path
import subprocess
import sys
try:
# The argparse module was added to Python as of version 2.7. However, there
# is a backport for older versions of Python and we expect that it is
# installed into the virtualenv.
import argparse
except ImportError:
print(
"conveyor-start: missing required module 'argparse'; is the virtualenv activated?",
file=sys.stderr)
sys.exit(1)
def _main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
'--config',
action='store',
type=str,
required=False,
help='read configuration from FILE',
metavar='FILE',
dest='config_file')
parsed_args, unparsed_args = parser.parse_known_args(argv[1:])
if None is parsed_args.config_file:
parsed_args.config_file = 'conveyor-dev.conf'
try:
with open(parsed_args.config_file) as fp:
config = json.load(fp)
except ValueError:
pid_file = 'conveyord.pid'
else:
pid_file = config.get('common', {}).get('pid_file', 'conveyord.pid')
if os.path.exists(pid_file):
print(
'conveyor-start: pid file exists; is the conveyor service already running?',
file=sys.stderr)
return 1
elif 'VIRTUAL_ENV' not in os.environ:
print('conveyor-start: virtualenv is not activated', file=sys.stderr)
return 1
else:
path = os.pathsep.join([
os.path.join('src', 'main', 'python'),
os.path.join(os.pardir, 's3g'),
])
if 'PYTHONPATH' not in os.environ:
os.environ['PYTHONPATH'] = path
else:
os.environ['PYTHONPATH'] = os.pathsep.join((
path, os.environ['PYTHONPATH']))
arguments = [
'python',
'-B',
'-m', 'conveyor.server.__main__',
'-c', parsed_args.config_file,
]
if len(unparsed_args) > 0 and '--' == unparsed_args[0]:
unparsed_args = unparsed_args[1:]
arguments.extend(unparsed_args)
os.execvp(sys.executable, arguments) # NOTE: this line does not return.
if '__main__' == __name__:
code = _main(sys.argv)
if None is code:
code = 0
sys.exit(code)
| agpl-3.0 |
Dhivyap/ansible | lib/ansible/modules/web_infrastructure/_nginx_status_facts.py | 21 | 5143 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nginx_status_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(nginx_status_info) instead.
short_description: Retrieve nginx status facts.
description:
- Gathers facts from nginx from an URL having C(stub_status) enabled.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
url:
description:
- URL of the nginx status.
required: true
timeout:
description:
- HTTP connection timeout in seconds.
required: false
default: 10
notes:
- See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information.
'''
EXAMPLES = '''
# Gather status facts from nginx on localhost
- name: get current http stats
nginx_status_facts:
url: http://localhost/nginx_status
# Gather status facts from nginx on localhost with a custom timeout of 20 seconds
- name: get current http stats
nginx_status_facts:
url: http://localhost/nginx_status
timeout: 20
'''
RETURN = '''
---
nginx_status_facts.active_connections:
description: Active connections.
returned: success
type: int
sample: 2340
nginx_status_facts.accepts:
description: The total number of accepted client connections.
returned: success
type: int
sample: 81769947
nginx_status_facts.handled:
description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
returned: success
type: int
sample: 81769947
nginx_status_facts.requests:
description: The total number of client requests.
returned: success
type: int
sample: 144332345
nginx_status_facts.reading:
description: The current number of connections where nginx is reading the request header.
returned: success
type: int
sample: 0
nginx_status_facts.writing:
description: The current number of connections where nginx is writing the response back to the client.
returned: success
type: int
sample: 241
nginx_status_facts.waiting:
description: The current number of idle client connections waiting for a request.
returned: success
type: int
sample: 2092
nginx_status_facts.data:
description: HTTP response as is.
returned: success
type: str
sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
class NginxStatusFacts(object):
def __init__(self):
self.url = module.params.get('url')
self.timeout = module.params.get('timeout')
def run(self):
result = {
'nginx_status_facts': {
'active_connections': None,
'accepts': None,
'handled': None,
'requests': None,
'reading': None,
'writing': None,
'waiting': None,
'data': None,
}
}
(response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
if not response:
module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
data = to_text(response.read(), errors='surrogate_or_strict')
if not data:
return result
result['nginx_status_facts']['data'] = data
expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
match = re.match(expr, data, re.S)
if match:
result['nginx_status_facts']['active_connections'] = int(match.group(1))
result['nginx_status_facts']['accepts'] = int(match.group(2))
result['nginx_status_facts']['handled'] = int(match.group(3))
result['nginx_status_facts']['requests'] = int(match.group(4))
result['nginx_status_facts']['reading'] = int(match.group(5))
result['nginx_status_facts']['writing'] = int(match.group(6))
result['nginx_status_facts']['waiting'] = int(match.group(7))
return result
def main():
global module
module = AnsibleModule(
argument_spec=dict(
url=dict(required=True),
timeout=dict(type='int', default=10),
),
supports_check_mode=True,
)
nginx_status_facts = NginxStatusFacts().run()
result = dict(changed=False, ansible_facts=nginx_status_facts)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
rven/odoo | odoo/addons/test_populate/models.py | 3 | 3496 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api
from odoo.tools import populate, pycompat
class TestPopulateModel(models.Model):
_name = 'test.populate'
_description = 'Test Populate'
name = fields.Char(default='Foo')
state = fields.Selection([('a', 'A'), ('b', 'B')], default='a')
active = fields.Boolean('Active', default=True)
category_id = fields.Many2one('test.populate.category', 'Category')
some_ref = fields.Integer('Reference')
dependant_field_1 = fields.Char('Dependant 1')
dependant_field_2 = fields.Char('Dependant 2')
sequence = fields.Integer("Sequence")
_populate_dependencies = ['test.populate.category']
_populate_sizes = {
'small': 20,
'medium': 30,
'large': 100,
}
def _populate_factories(self):
# cross dependant field in a sub generator, cartesian product of two fields
dependant_factories = [
('dependant_field_1', populate.cartesian(['d1_1', 'd1_2'])),
('dependant_field_2', populate.cartesian(['d2_1', 'd2_2', 'd2_3_{counter}'])),
]
def generate_dependant(iterator, *args):
dependants_generator = populate.chain_factories(dependant_factories, self._name)
for values in dependants_generator:
dependant_values = next(iterator)
yield {**values, **dependant_values, '__complete': values['__complete'] and dependant_values['__complete']}
def get_name(values=None, counter=0, **kwargs):
active = 'active' if values['active'] else 'inactive'
cat = 'filling' if values['__complete'] else 'corner'
return '%s_%s_%s' % (active, cat, counter)
category_ids = self.env.registry.populated_models['test.populate.category']
return [
('active', populate.cartesian([True, False], [3, 1])),
('state', populate.cartesian([False] + self.env['test.populate']._fields['state'].get_values(self.env))),
('some_ref', populate.iterate([False, 1, 2, 3, 4])),
('_dependant', generate_dependant),
('name', populate.compute(get_name)),
('category_id', populate.randomize([False] + category_ids)),
('sequence', populate.randint(1, 10))
]
class TestPopulateDependencyModel(models.Model):
_name = 'test.populate.category'
_description = 'Test Populate Category'
_populate_sizes = {
'small': 3,
'medium': 10,
'large': 20,
}
name = fields.Char('Name', required=True, default='Cat1')
active = fields.Boolean('Active', default=True)
def _populate_factories(self):
return [
('active', populate.cartesian([True, False], [9, 1])),
('name', populate.cartesian(['Cat1', 'Cat2', 'Cat3'])),
]
class TestNoPopulateModelInherit(models.Model):
_name = 'test.populate.inherit'
_inherit = 'test.populate'
_description = 'Test populate inherit'
additionnal_field = fields.Char(required=True)
def _populate_factories(self):
return super()._populate_factories() + [
('additionnal_field', populate.iterate(['V1', 'V2', 'V3'])),
]
class TestNoPopulateModel(models.Model):
_name = 'test.no.populate'
_description = 'A model with no populate method and a required field, should not crash'
name = fields.Char(required=True)
| agpl-3.0 |
NXPmicro/mbed | tools/host_tests/echo.py | 122 | 2080 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import uuid
from sys import stdout
class EchoTest():
# Test parameters
TEST_SERIAL_BAUDRATE = 115200
TEST_LOOP_COUNT = 50
def test(self, selftest):
""" This host test will use mbed serial port with
baudrate 115200 to perform echo test on that port.
"""
# Custom initialization for echo test
selftest.mbed.init_serial_params(serial_baud=self.TEST_SERIAL_BAUDRATE)
selftest.mbed.init_serial()
# Test function, return True or False to get standard test notification on stdout
selftest.mbed.flush()
selftest.notify("HOST: Starting the ECHO test")
result = True
""" This ensures that there are no parasites left in the serial buffer.
"""
for i in range(0, 2):
selftest.mbed.serial_write("\n")
c = selftest.mbed.serial_readline()
for i in range(0, self.TEST_LOOP_COUNT):
TEST_STRING = str(uuid.uuid4()) + "\n"
selftest.mbed.serial_write(TEST_STRING)
c = selftest.mbed.serial_readline()
if c is None:
return selftest.RESULT_IO_SERIAL
if c.strip() != TEST_STRING.strip():
selftest.notify('HOST: "%s" != "%s"'% (c, TEST_STRING))
result = False
else:
sys.stdout.write('.')
stdout.flush()
return selftest.RESULT_SUCCESS if result else selftest.RESULT_FAILURE
| apache-2.0 |
maestro-hybrid-cloud/keystone | keystone/tests/unit/test_v3_identity.py | 3 | 27350 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import uuid
import fixtures
import mock
from oslo_config import cfg
from six.moves import http_client
from testtools import matchers
from keystone.common import controller
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import test_v3
CONF = cfg.CONF
class IdentityTestCase(test_v3.RestfulTestCase):
"""Test users and groups."""
def setUp(self):
super(IdentityTestCase, self).setUp()
self.group = self.new_group_ref(
domain_id=self.domain_id)
self.group = self.identity_api.create_group(self.group)
self.group_id = self.group['id']
self.credential_id = uuid.uuid4().hex
self.credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
self.credential['id'] = self.credential_id
self.credential_api.create_credential(
self.credential_id,
self.credential)
# user crud tests
def test_create_user(self):
"""Call ``POST /users``."""
ref = self.new_user_ref(domain_id=self.domain_id)
r = self.post(
'/users',
body={'user': ref})
return self.assertValidUserResponse(r, ref)
def test_create_user_without_domain(self):
"""Call ``POST /users`` without specifying domain.
According to the identity-api specification, if you do not
explicitly specific the domain_id in the entity, it should
take the domain scope of the token as the domain_id.
"""
# Create a user with a role on the domain so we can get a
# domain scoped token
domain = self.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
user = self.new_user_ref(domain_id=domain['id'])
password = user['password']
user = self.identity_api.create_user(user)
user['password'] = password
self.assignment_api.create_grant(
role_id=self.role_id, user_id=user['id'],
domain_id=domain['id'])
ref = self.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
user_id=user['id'],
password=user['password'],
domain_id=domain['id'])
r = self.post('/users', body={'user': ref_nd}, auth=auth)
self.assertValidUserResponse(r, ref)
# Now try the same thing without a domain token - which should fail
ref = self.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
# TODO(henry-nash): Due to bug #1283539 we currently automatically
# use the default domain_id if a domain scoped token is not being
# used. For now we just check that a deprecation warning has been
# issued. Change the code below to expect a failure once this bug is
# fixed.
with mock.patch(
'oslo_log.versionutils.report_deprecated_feature') as mock_dep:
r = self.post('/users', body={'user': ref_nd}, auth=auth)
self.assertTrue(mock_dep.called)
ref['domain_id'] = CONF.identity.default_domain_id
return self.assertValidUserResponse(r, ref)
def test_create_user_bad_request(self):
"""Call ``POST /users``."""
self.post('/users', body={'user': {}},
expected_status=http_client.BAD_REQUEST)
def test_list_users(self):
"""Call ``GET /users``."""
resource_url = '/users'
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=self.user,
resource_url=resource_url)
def test_list_users_with_multiple_backends(self):
"""Call ``GET /users`` when multiple backends is enabled.
In this scenario, the controller requires a domain to be specified
either as a filter or by using a domain scoped token.
"""
self.config_fixture.config(group='identity',
domain_specific_drivers_enabled=True)
# Create a user with a role on the domain so we can get a
# domain scoped token
domain = self.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
user = self.new_user_ref(domain_id=domain['id'])
password = user['password']
user = self.identity_api.create_user(user)
user['password'] = password
self.assignment_api.create_grant(
role_id=self.role_id, user_id=user['id'],
domain_id=domain['id'])
ref = self.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
user_id=user['id'],
password=user['password'],
domain_id=domain['id'])
# First try using a domain scoped token
resource_url = '/users'
r = self.get(resource_url, auth=auth)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
# Now try with an explicit filter
resource_url = ('/users?domain_id=%(domain_id)s' %
{'domain_id': domain['id']})
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
# Now try the same thing without a domain token or filter,
# which should fail
r = self.get('/users', expected_status=exception.Unauthorized.code)
def test_list_users_with_static_admin_token_and_multiple_backends(self):
# domain-specific operations with the bootstrap ADMIN token is
# disallowed when domain-specific drivers are enabled
self.config_fixture.config(group='identity',
domain_specific_drivers_enabled=True)
self.get('/users', token=CONF.admin_token,
expected_status=exception.Unauthorized.code)
def test_list_users_no_default_project(self):
"""Call ``GET /users`` making sure no default_project_id."""
user = self.new_user_ref(self.domain_id)
user = self.identity_api.create_user(user)
resource_url = '/users'
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
def test_get_user(self):
"""Call ``GET /users/{user_id}``."""
r = self.get('/users/%(user_id)s' % {
'user_id': self.user['id']})
self.assertValidUserResponse(r, self.user)
def test_get_user_with_default_project(self):
"""Call ``GET /users/{user_id}`` making sure of default_project_id."""
user = self.new_user_ref(domain_id=self.domain_id,
project_id=self.project_id)
user = self.identity_api.create_user(user)
r = self.get('/users/%(user_id)s' % {'user_id': user['id']})
self.assertValidUserResponse(r, user)
def test_add_user_to_group(self):
"""Call ``PUT /groups/{group_id}/users/{user_id}``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
def test_list_groups_for_user(self):
"""Call ``GET /users/{user_id}/groups``."""
self.user1 = self.new_user_ref(
domain_id=self.domain['id'])
password = self.user1['password']
self.user1 = self.identity_api.create_user(self.user1)
self.user1['password'] = password
self.user2 = self.new_user_ref(
domain_id=self.domain['id'])
password = self.user2['password']
self.user2 = self.identity_api.create_user(self.user2)
self.user2['password'] = password
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user1['id']})
# Scenarios below are written to test the default policy configuration
# One should be allowed to list one's own groups
auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'])
resource_url = ('/users/%(user_id)s/groups' %
{'user_id': self.user1['id']})
r = self.get(resource_url, auth=auth)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
# Administrator is allowed to list others' groups
resource_url = ('/users/%(user_id)s/groups' %
{'user_id': self.user1['id']})
r = self.get(resource_url)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
# Ordinary users should not be allowed to list other's groups
auth = self.build_authentication_request(
user_id=self.user2['id'],
password=self.user2['password'])
r = self.get('/users/%(user_id)s/groups' % {
'user_id': self.user1['id']}, auth=auth,
expected_status=exception.ForbiddenAction.code)
def test_check_user_in_group(self):
"""Call ``HEAD /groups/{group_id}/users/{user_id}``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
self.head('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
def test_list_users_in_group(self):
"""Call ``GET /groups/{group_id}/users``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
resource_url = ('/groups/%(group_id)s/users' %
{'group_id': self.group_id})
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=self.user,
resource_url=resource_url)
self.assertIn('/groups/%(group_id)s/users' % {
'group_id': self.group_id}, r.result['links']['self'])
def test_remove_user_from_group(self):
"""Call ``DELETE /groups/{group_id}/users/{user_id}``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
self.delete('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
def test_update_user(self):
"""Call ``PATCH /users/{user_id}``."""
user = self.new_user_ref(domain_id=self.domain_id)
del user['id']
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body={'user': user})
self.assertValidUserResponse(r, user)
def test_admin_password_reset(self):
# bootstrap a user as admin
user_ref = self.new_user_ref(domain_id=self.domain['id'])
password = user_ref['password']
user_ref = self.identity_api.create_user(user_ref)
# auth as user should work before a password change
old_password_auth = self.build_authentication_request(
user_id=user_ref['id'],
password=password)
r = self.v3_create_token(old_password_auth)
old_token = r.headers.get('X-Subject-Token')
# auth as user with a token should work before a password change
old_token_auth = self.build_authentication_request(token=old_token)
self.v3_create_token(old_token_auth)
# administrative password reset
new_password = uuid.uuid4().hex
self.patch('/users/%s' % user_ref['id'],
body={'user': {'password': new_password}})
# auth as user with original password should not work after change
self.v3_create_token(old_password_auth,
expected_status=http_client.UNAUTHORIZED)
# auth as user with an old token should not work after change
self.v3_create_token(old_token_auth,
expected_status=http_client.NOT_FOUND)
# new password should work
new_password_auth = self.build_authentication_request(
user_id=user_ref['id'],
password=new_password)
self.v3_create_token(new_password_auth)
def test_update_user_domain_id(self):
"""Call ``PATCH /users/{user_id}`` with domain_id."""
user = self.new_user_ref(domain_id=self.domain['id'])
user = self.identity_api.create_user(user)
user['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/users/%(user_id)s' % {
'user_id': user['id']},
body={'user': user},
expected_status=exception.ValidationError.code)
self.config_fixture.config(domain_id_immutable=False)
user['domain_id'] = self.domain['id']
r = self.patch('/users/%(user_id)s' % {
'user_id': user['id']},
body={'user': user})
self.assertValidUserResponse(r, user)
def test_delete_user(self):
"""Call ``DELETE /users/{user_id}``.
As well as making sure the delete succeeds, we ensure
that any credentials that reference this user are
also deleted, while other credentials are unaffected.
In addition, no tokens should remain valid for this user.
"""
# First check the credential for this user is present
r = self.credential_api.get_credential(self.credential['id'])
self.assertDictEqual(self.credential, r)
# Create a second credential with a different user
self.user2 = self.new_user_ref(
domain_id=self.domain['id'],
project_id=self.project['id'])
self.user2 = self.identity_api.create_user(self.user2)
self.credential2 = self.new_credential_ref(
user_id=self.user2['id'],
project_id=self.project['id'])
self.credential_api.create_credential(
self.credential2['id'],
self.credential2)
# Create a token for this user which we can check later
# gets deleted
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
token = self.get_requested_token(auth_data)
# Confirm token is valid for now
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
expected_status=http_client.OK)
# Now delete the user
self.delete('/users/%(user_id)s' % {
'user_id': self.user['id']})
# Deleting the user should have deleted any credentials
# that reference this project
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
self.credential['id'])
# And the no tokens we remain valid
tokens = self.token_provider_api._persistence._list_tokens(
self.user['id'])
self.assertEqual(0, len(tokens))
# But the credential for user2 is unaffected
r = self.credential_api.get_credential(self.credential2['id'])
self.assertDictEqual(self.credential2, r)
# group crud tests
def test_create_group(self):
"""Call ``POST /groups``."""
ref = self.new_group_ref(domain_id=self.domain_id)
r = self.post(
'/groups',
body={'group': ref})
return self.assertValidGroupResponse(r, ref)
def test_create_group_bad_request(self):
"""Call ``POST /groups``."""
self.post('/groups', body={'group': {}},
expected_status=http_client.BAD_REQUEST)
def test_list_groups(self):
"""Call ``GET /groups``."""
resource_url = '/groups'
r = self.get(resource_url)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
def test_get_group(self):
"""Call ``GET /groups/{group_id}``."""
r = self.get('/groups/%(group_id)s' % {
'group_id': self.group_id})
self.assertValidGroupResponse(r, self.group)
def test_update_group(self):
"""Call ``PATCH /groups/{group_id}``."""
group = self.new_group_ref(domain_id=self.domain_id)
del group['id']
r = self.patch('/groups/%(group_id)s' % {
'group_id': self.group_id},
body={'group': group})
self.assertValidGroupResponse(r, group)
def test_update_group_domain_id(self):
"""Call ``PATCH /groups/{group_id}`` with domain_id."""
group = self.new_group_ref(domain_id=self.domain['id'])
group = self.identity_api.create_group(group)
group['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/groups/%(group_id)s' % {
'group_id': group['id']},
body={'group': group},
expected_status=exception.ValidationError.code)
self.config_fixture.config(domain_id_immutable=False)
group['domain_id'] = self.domain['id']
r = self.patch('/groups/%(group_id)s' % {
'group_id': group['id']},
body={'group': group})
self.assertValidGroupResponse(r, group)
def test_delete_group(self):
"""Call ``DELETE /groups/{group_id}``."""
self.delete('/groups/%(group_id)s' % {
'group_id': self.group_id})
def test_create_user_password_not_logged(self):
# When a user is created, the password isn't logged at any level.
log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
ref = self.new_user_ref(domain_id=self.domain_id)
self.post(
'/users',
body={'user': ref})
self.assertNotIn(ref['password'], log_fix.output)
def test_update_password_not_logged(self):
# When admin modifies user password, the password isn't logged at any
# level.
log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# bootstrap a user as admin
user_ref = self.new_user_ref(domain_id=self.domain['id'])
password = user_ref['password']
user_ref = self.identity_api.create_user(user_ref)
# administrative password reset
new_password = uuid.uuid4().hex
self.patch('/users/%s' % user_ref['id'],
body={'user': {'password': new_password}})
self.assertNotIn(password, log_fix.output)
self.assertNotIn(new_password, log_fix.output)
class IdentityV3toV2MethodsTestCase(unit.TestCase):
"""Test users V3 to V2 conversion methods."""
def setUp(self):
super(IdentityV3toV2MethodsTestCase, self).setUp()
self.load_backends()
self.user_id = uuid.uuid4().hex
self.default_project_id = uuid.uuid4().hex
self.tenant_id = uuid.uuid4().hex
# User with only default_project_id in ref
self.user1 = {'id': self.user_id,
'name': self.user_id,
'default_project_id': self.default_project_id,
'domain_id': CONF.identity.default_domain_id}
# User without default_project_id or tenantId in ref
self.user2 = {'id': self.user_id,
'name': self.user_id,
'domain_id': CONF.identity.default_domain_id}
# User with both tenantId and default_project_id in ref
self.user3 = {'id': self.user_id,
'name': self.user_id,
'default_project_id': self.default_project_id,
'tenantId': self.tenant_id,
'domain_id': CONF.identity.default_domain_id}
# User with only tenantId in ref
self.user4 = {'id': self.user_id,
'name': self.user_id,
'tenantId': self.tenant_id,
'domain_id': CONF.identity.default_domain_id}
# Expected result if the user is meant to have a tenantId element
self.expected_user = {'id': self.user_id,
'name': self.user_id,
'username': self.user_id,
'tenantId': self.default_project_id}
# Expected result if the user is not meant to have a tenantId element
self.expected_user_no_tenant_id = {'id': self.user_id,
'name': self.user_id,
'username': self.user_id}
def test_v3_to_v2_user_method(self):
updated_user1 = controller.V2Controller.v3_to_v2_user(self.user1)
self.assertIs(self.user1, updated_user1)
self.assertDictEqual(self.expected_user, self.user1)
updated_user2 = controller.V2Controller.v3_to_v2_user(self.user2)
self.assertIs(self.user2, updated_user2)
self.assertDictEqual(self.expected_user_no_tenant_id, self.user2)
updated_user3 = controller.V2Controller.v3_to_v2_user(self.user3)
self.assertIs(self.user3, updated_user3)
self.assertDictEqual(self.expected_user, self.user3)
updated_user4 = controller.V2Controller.v3_to_v2_user(self.user4)
self.assertIs(self.user4, updated_user4)
self.assertDictEqual(self.expected_user_no_tenant_id, self.user4)
def test_v3_to_v2_user_method_list(self):
user_list = [self.user1, self.user2, self.user3, self.user4]
updated_list = controller.V2Controller.v3_to_v2_user(user_list)
self.assertEqual(len(user_list), len(updated_list))
for i, ref in enumerate(updated_list):
# Order should not change.
self.assertIs(ref, user_list[i])
self.assertDictEqual(self.expected_user, self.user1)
self.assertDictEqual(self.expected_user_no_tenant_id, self.user2)
self.assertDictEqual(self.expected_user, self.user3)
self.assertDictEqual(self.expected_user_no_tenant_id, self.user4)
class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase):
def setUp(self):
super(UserSelfServiceChangingPasswordsTestCase, self).setUp()
self.user_ref = self.new_user_ref(domain_id=self.domain['id'])
password = self.user_ref['password']
self.user_ref = self.identity_api.create_user(self.user_ref)
self.user_ref['password'] = password
self.token = self.get_request_token(self.user_ref['password'],
http_client.CREATED)
def get_request_token(self, password, expected_status):
auth_data = self.build_authentication_request(
user_id=self.user_ref['id'],
password=password)
r = self.v3_create_token(auth_data,
expected_status=expected_status)
return r.headers.get('X-Subject-Token')
def change_password(self, expected_status, **kwargs):
"""Returns a test response for a change password request."""
return self.post('/users/%s/password' % self.user_ref['id'],
body={'user': kwargs},
token=self.token,
expected_status=expected_status)
def test_changing_password(self):
# original password works
token_id = self.get_request_token(self.user_ref['password'],
expected_status=http_client.CREATED)
# original token works
old_token_auth = self.build_authentication_request(token=token_id)
self.v3_create_token(old_token_auth)
# change password
new_password = uuid.uuid4().hex
self.change_password(password=new_password,
original_password=self.user_ref['password'],
expected_status=http_client.NO_CONTENT)
# old password fails
self.get_request_token(self.user_ref['password'],
expected_status=http_client.UNAUTHORIZED)
# old token fails
self.v3_create_token(old_token_auth,
expected_status=http_client.NOT_FOUND)
# new password works
self.get_request_token(new_password,
expected_status=http_client.CREATED)
def test_changing_password_with_missing_original_password_fails(self):
r = self.change_password(password=uuid.uuid4().hex,
expected_status=http_client.BAD_REQUEST)
self.assertThat(r.result['error']['message'],
matchers.Contains('original_password'))
def test_changing_password_with_missing_password_fails(self):
r = self.change_password(original_password=self.user_ref['password'],
expected_status=http_client.BAD_REQUEST)
self.assertThat(r.result['error']['message'],
matchers.Contains('password'))
def test_changing_password_with_incorrect_password_fails(self):
self.change_password(password=uuid.uuid4().hex,
original_password=uuid.uuid4().hex,
expected_status=http_client.UNAUTHORIZED)
def test_changing_password_with_disabled_user_fails(self):
# disable the user account
self.user_ref['enabled'] = False
self.patch('/users/%s' % self.user_ref['id'],
body={'user': self.user_ref})
self.change_password(password=uuid.uuid4().hex,
original_password=self.user_ref['password'],
expected_status=http_client.UNAUTHORIZED)
def test_changing_password_not_logged(self):
# When a user changes their password, the password isn't logged at any
# level.
log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# change password
new_password = uuid.uuid4().hex
self.change_password(password=new_password,
original_password=self.user_ref['password'],
expected_status=http_client.NO_CONTENT)
self.assertNotIn(self.user_ref['password'], log_fix.output)
self.assertNotIn(new_password, log_fix.output)
| apache-2.0 |
m4ns0ur/grumpy | third_party/pythonparser/lexer.py | 6 | 25013 | """
The :mod:`lexer` module concerns itself with tokenizing Python source.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from . import source, diagnostic
import re
import unicodedata
import sys
if sys.version_info[0] == 3:
unichr = chr
byte = lambda x: bytes([x])
else:
byte = chr
class Token:
"""
The :class:`Token` encapsulates a single lexer token and its location
in the source code.
:ivar loc: (:class:`pythonparser.source.Range`) token location
:ivar kind: (string) token kind
:ivar value: token value; None or a kind-specific class
"""
def __init__(self, loc, kind, value=None):
self.loc, self.kind, self.value = loc, kind, value
def __repr__(self):
return "Token(%s, \"%s\", %s)" % (repr(self.loc), self.kind, repr(self.value))
class Lexer:
"""
The :class:`Lexer` class extracts tokens and comments from
a :class:`pythonparser.source.Buffer`.
:class:`Lexer` is an iterable.
:ivar version: (tuple of (*major*, *minor*))
the version of Python, determining the grammar used
:ivar source_buffer: (:class:`pythonparser.source.Buffer`)
the source buffer
:ivar diagnostic_engine: (:class:`pythonparser.diagnostic.Engine`)
the diagnostic engine
:ivar offset: (integer) character offset into ``source_buffer``
indicating where the next token will be recognized
:ivar interactive: (boolean) whether a completely empty line
should generate a NEWLINE token, for use in REPLs
"""
_reserved_2_6 = frozenset([
"!=", "%", "%=", "&", "&=", "(", ")", "*", "**", "**=", "*=", "+", "+=",
",", "-", "-=", ".", "/", "//", "//=", "/=", ":", ";", "<", "<<", "<<=",
"<=", "<>", "=", "==", ">", ">=", ">>", ">>=", "@", "[", "]", "^", "^=", "`",
"and", "as", "assert", "break", "class", "continue", "def", "del", "elif",
"else", "except", "exec", "finally", "for", "from", "global", "if", "import",
"in", "is", "lambda", "not", "or", "pass", "print", "raise", "return", "try",
"while", "with", "yield", "{", "|", "|=", "}", "~"
])
_reserved_3_0 = _reserved_2_6 \
- set(["<>", "`", "exec", "print"]) \
| set(["->", "...", "False", "None", "nonlocal", "True"])
_reserved_3_1 = _reserved_3_0 \
| set(["<>"])
_reserved_3_5 = _reserved_3_1 \
| set(["@", "@="])
_reserved = {
(2, 6): _reserved_2_6,
(2, 7): _reserved_2_6,
(3, 0): _reserved_3_0,
(3, 1): _reserved_3_1,
(3, 2): _reserved_3_1,
(3, 3): _reserved_3_1,
(3, 4): _reserved_3_1,
(3, 5): _reserved_3_5,
}
"""
A map from a tuple (*major*, *minor*) corresponding to Python version to
:class:`frozenset`\s of keywords.
"""
_string_prefixes_3_1 = frozenset(["", "r", "b", "br"])
_string_prefixes_3_3 = frozenset(["", "r", "u", "b", "br", "rb"])
# holy mother of god why
_string_prefixes = {
(2, 6): frozenset(["", "r", "u", "ur"]),
(2, 7): frozenset(["", "r", "u", "ur", "b", "br"]),
(3, 0): frozenset(["", "r", "b"]),
(3, 1): _string_prefixes_3_1,
(3, 2): _string_prefixes_3_1,
(3, 3): _string_prefixes_3_3,
(3, 4): _string_prefixes_3_3,
(3, 5): _string_prefixes_3_3,
}
"""
A map from a tuple (*major*, *minor*) corresponding to Python version to
:class:`frozenset`\s of string prefixes.
"""
def __init__(self, source_buffer, version, diagnostic_engine, interactive=False):
self.source_buffer = source_buffer
self.version = version
self.diagnostic_engine = diagnostic_engine
self.interactive = interactive
self.print_function = False
self.unicode_literals = self.version >= (3, 0)
self.offset = 0
self.new_line = True
self.indent = [(0, source.Range(source_buffer, 0, 0), "")]
self.comments = []
self.queue = []
self.parentheses = []
self.curly_braces = []
self.square_braces = []
try:
reserved = self._reserved[version]
except KeyError:
raise NotImplementedError("pythonparser.lexer.Lexer cannot lex Python %s" % str(version))
# Sort for the regexp to obey longest-match rule.
re_reserved = sorted(reserved, reverse=True, key=len)
re_keywords = "|".join([kw for kw in re_reserved if kw.isalnum()])
re_operators = "|".join([re.escape(op) for op in re_reserved if not op.isalnum()])
# Python 3.0 uses ID_Start, >3.0 uses XID_Start
if self.version == (3, 0):
id_xid = ""
else:
id_xid = "X"
# To speed things up on CPython, we use the re module to generate a DFA
# from our token set and execute it in C. Every result yielded by
# iterating this regular expression has exactly one non-empty group
# that would correspond to a e.g. lex scanner branch.
# The only thing left to Python code is then to select one from this
# small set of groups, which is much faster than dissecting the strings.
#
# A lexer has to obey longest-match rule, but a regular expression does not.
# Therefore, the cases in it are carefully sorted so that the longest
# ones come up first. The exception is the identifier case, which would
# otherwise grab all keywords; it is made to work by making it impossible
# for the keyword case to match a word prefix, and ordering it before
# the identifier case.
self._lex_token_re = re.compile(r"""
[ \t\f]* # initial whitespace
( # 1
(\\)? # ?2 line continuation
([\n]|[\r][\n]|[\r]) # 3 newline
| (\#.*) # 4 comment
| ( # 5 floating point or complex literal
(?: [0-9]* \. [0-9]+
| [0-9]+ \.?
) [eE] [+-]? [0-9]+
| [0-9]* \. [0-9]+
| [0-9]+ \.
) ([jJ])? # ?6 complex suffix
| ([0-9]+) [jJ] # 7 complex literal
| (?: # integer literal
( [1-9] [0-9]* ) # 8 dec
| 0[oO] ( [0-7]+ ) # 9 oct
| 0[xX] ( [0-9A-Fa-f]+ ) # 10 hex
| 0[bB] ( [01]+ ) # 11 bin
| ( [0-9] [0-9]* ) # 12 bare oct
)
([Ll])? # 13 long option
| ([BbUu]?[Rr]?) # ?14 string literal options
(?: # string literal start
# 15, 16, 17 long string
(""\"|''') ((?: \\?[\n] | \\. | . )*?) (\15)
# 18, 19, 20 short string
| (" |' ) ((?: \\ [\n] | \\. | . )*?) (\18)
# 21 unterminated
| (""\"|'''|"|')
)
| ((?:{keywords})\b|{operators}) # 22 keywords and operators
| ([A-Za-z_][A-Za-z0-9_]*\b) # 23 identifier
| (\p{{{id_xid}ID_Start}}\p{{{id_xid}ID_Continue}}*) # 24 Unicode identifier
| ($) # 25 end-of-file
)
""".format(keywords=re_keywords, operators=re_operators,
id_xid=id_xid), re.VERBOSE|re.UNICODE)
# These are identical for all lexer instances.
_lex_escape_pattern = r"""
\\(?:
([\n\\'"abfnrtv]) # 1 single-char
| ([0-7]{1,3}) # 2 oct
| x([0-9A-Fa-f]{2}) # 3 hex
)
"""
_lex_escape_re = re.compile(_lex_escape_pattern.encode(), re.VERBOSE)
_lex_escape_unicode_re = re.compile(_lex_escape_pattern + r"""
| \\(?:
u([0-9A-Fa-f]{4}) # 4 unicode-16
| U([0-9A-Fa-f]{8}) # 5 unicode-32
| N\{(.+?)\} # 6 unicode-name
)
""", re.VERBOSE)
def next(self, eof_token=False):
"""
Returns token at ``offset`` as a :class:`Token` and advances ``offset``
to point past the end of the token, where the token has:
- *range* which is a :class:`pythonparser.source.Range` that includes
the token but not surrounding whitespace,
- *kind* which is a string containing one of Python keywords or operators,
``newline``, ``float``, ``int``, ``complex``, ``strbegin``,
``strdata``, ``strend``, ``ident``, ``indent``, ``dedent`` or ``eof``
(if ``eof_token`` is True).
- *value* which is the flags as lowercase string if *kind* is ``strbegin``,
the string contents if *kind* is ``strdata``,
the numeric value if *kind* is ``float``, ``int`` or ``complex``,
the identifier if *kind* is ``ident`` and ``None`` in any other case.
:param eof_token: if true, will return a token with kind ``eof``
when the input is exhausted; if false, will raise ``StopIteration``.
"""
if len(self.queue) == 0:
self._refill(eof_token)
return self.queue.pop(0)
def peek(self, eof_token=False):
"""Same as :meth:`next`, except the token is not dequeued."""
if len(self.queue) == 0:
self._refill(eof_token)
return self.queue[-1]
# We need separate next and _refill because lexing can sometimes
# generate several tokens, e.g. INDENT
def _refill(self, eof_token):
if self.offset == len(self.source_buffer.source):
range = source.Range(self.source_buffer, self.offset, self.offset)
if not self.new_line:
self.new_line = True
self.queue.append(Token(range, "newline"))
return
for i in self.indent[1:]:
self.indent.pop(-1)
self.queue.append(Token(range, "dedent"))
if eof_token:
self.queue.append(Token(range, "eof"))
elif len(self.queue) == 0:
raise StopIteration
return
match = self._lex_token_re.match(self.source_buffer.source, self.offset)
if match is None:
diag = diagnostic.Diagnostic(
"fatal", "unexpected {character}",
{"character": repr(self.source_buffer.source[self.offset]).lstrip("u")},
source.Range(self.source_buffer, self.offset, self.offset + 1))
self.diagnostic_engine.process(diag)
# Should we emit indent/dedent?
if self.new_line and \
match.group(3) is None and \
match.group(4) is None: # not a blank line
whitespace = match.string[match.start(0):match.start(1)]
level = len(whitespace.expandtabs())
range = source.Range(self.source_buffer, match.start(1), match.start(1))
if level > self.indent[-1][0]:
self.indent.append((level, range, whitespace))
self.queue.append(Token(range, "indent"))
elif level < self.indent[-1][0]:
exact = False
while level <= self.indent[-1][0]:
if level == self.indent[-1][0] or self.indent[-1][0] == 0:
exact = True
break
self.indent.pop(-1)
self.queue.append(Token(range, "dedent"))
if not exact:
note = diagnostic.Diagnostic(
"note", "expected to match level here", {},
self.indent[-1][1])
error = diagnostic.Diagnostic(
"fatal", "inconsistent indentation", {},
range, notes=[note])
self.diagnostic_engine.process(error)
elif whitespace != self.indent[-1][2] and self.version >= (3, 0):
error = diagnostic.Diagnostic(
"error", "inconsistent use of tabs and spaces in indentation", {},
range)
self.diagnostic_engine.process(error)
# Prepare for next token.
self.offset = match.end(0)
tok_range = source.Range(self.source_buffer, *match.span(1))
if match.group(3) is not None: # newline
if len(self.parentheses) + len(self.square_braces) + len(self.curly_braces) > 0:
# 2.1.6 Implicit line joining
return self._refill(eof_token)
if match.group(2) is not None:
# 2.1.5. Explicit line joining
return self._refill(eof_token)
if self.new_line and not \
(self.interactive and match.group(0) == match.group(3)): # REPL terminator
# 2.1.7. Blank lines
return self._refill(eof_token)
self.new_line = True
self.queue.append(Token(tok_range, "newline"))
return
if match.group(4) is not None: # comment
self.comments.append(source.Comment(tok_range, match.group(4)))
return self._refill(eof_token)
# Lexing non-whitespace now.
self.new_line = False
if sys.version_info > (3,) or not match.group(13):
int_type = int
else:
int_type = long
if match.group(5) is not None: # floating point or complex literal
if match.group(6) is None:
self.queue.append(Token(tok_range, "float", float(match.group(5))))
else:
self.queue.append(Token(tok_range, "complex", float(match.group(5)) * 1j))
elif match.group(7) is not None: # complex literal
self.queue.append(Token(tok_range, "complex", int(match.group(7)) * 1j))
elif match.group(8) is not None: # integer literal, dec
literal = match.group(8)
self._check_long_literal(tok_range, match.group(1))
self.queue.append(Token(tok_range, "int", int_type(literal)))
elif match.group(9) is not None: # integer literal, oct
literal = match.group(9)
self._check_long_literal(tok_range, match.group(1))
self.queue.append(Token(tok_range, "int", int_type(literal, 8)))
elif match.group(10) is not None: # integer literal, hex
literal = match.group(10)
self._check_long_literal(tok_range, match.group(1))
self.queue.append(Token(tok_range, "int", int_type(literal, 16)))
elif match.group(11) is not None: # integer literal, bin
literal = match.group(11)
self._check_long_literal(tok_range, match.group(1))
self.queue.append(Token(tok_range, "int", int_type(literal, 2)))
elif match.group(12) is not None: # integer literal, bare oct
literal = match.group(12)
if len(literal) > 1 and self.version >= (3, 0):
error = diagnostic.Diagnostic(
"error", "in Python 3, decimal literals must not start with a zero", {},
source.Range(self.source_buffer, tok_range.begin_pos, tok_range.begin_pos + 1))
self.diagnostic_engine.process(error)
self.queue.append(Token(tok_range, "int", int(literal, 8)))
elif match.group(15) is not None: # long string literal
self._string_literal(
options=match.group(14), begin_span=(match.start(14), match.end(15)),
data=match.group(16), data_span=match.span(16),
end_span=match.span(17))
elif match.group(18) is not None: # short string literal
self._string_literal(
options=match.group(14), begin_span=(match.start(14), match.end(18)),
data=match.group(19), data_span=match.span(19),
end_span=match.span(20))
elif match.group(21) is not None: # unterminated string
error = diagnostic.Diagnostic(
"fatal", "unterminated string", {},
tok_range)
self.diagnostic_engine.process(error)
elif match.group(22) is not None: # keywords and operators
kwop = match.group(22)
self._match_pair_delim(tok_range, kwop)
if kwop == "print" and self.print_function:
self.queue.append(Token(tok_range, "ident", "print"))
else:
self.queue.append(Token(tok_range, kwop))
elif match.group(23) is not None: # identifier
self.queue.append(Token(tok_range, "ident", match.group(23)))
elif match.group(24) is not None: # Unicode identifier
if self.version < (3, 0):
error = diagnostic.Diagnostic(
"error", "in Python 2, Unicode identifiers are not allowed", {},
tok_range)
self.diagnostic_engine.process(error)
self.queue.append(Token(tok_range, "ident", match.group(24)))
elif match.group(25) is not None: # end-of-file
# Reuse the EOF logic
return self._refill(eof_token)
else:
assert False
def _string_literal(self, options, begin_span, data, data_span, end_span):
options = options.lower()
begin_range = source.Range(self.source_buffer, *begin_span)
data_range = source.Range(self.source_buffer, *data_span)
if options not in self._string_prefixes[self.version]:
error = diagnostic.Diagnostic(
"error", "string prefix '{prefix}' is not available in Python {major}.{minor}",
{"prefix": options, "major": self.version[0], "minor": self.version[1]},
begin_range)
self.diagnostic_engine.process(error)
self.queue.append(Token(begin_range, "strbegin", options))
self.queue.append(Token(data_range,
"strdata", self._replace_escape(data_range, options, data)))
self.queue.append(Token(source.Range(self.source_buffer, *end_span),
"strend"))
def _replace_escape(self, range, mode, value):
is_raw = ("r" in mode)
is_unicode = "u" in mode or ("b" not in mode and self.unicode_literals)
if not is_unicode:
value = value.encode(self.source_buffer.encoding)
if is_raw:
return value
return self._replace_escape_bytes(value)
if is_raw:
return value
return self._replace_escape_unicode(range, value)
def _replace_escape_unicode(self, range, value):
chunks = []
offset = 0
while offset < len(value):
match = self._lex_escape_unicode_re.search(value, offset)
if match is None:
# Append the remaining of the string
chunks.append(value[offset:])
break
# Append the part of string before match
chunks.append(value[offset:match.start()])
offset = match.end()
# Process the escape
if match.group(1) is not None: # single-char
chr = match.group(1)
if chr == "\n":
pass
elif chr == "\\" or chr == "'" or chr == "\"":
chunks.append(chr)
elif chr == "a":
chunks.append("\a")
elif chr == "b":
chunks.append("\b")
elif chr == "f":
chunks.append("\f")
elif chr == "n":
chunks.append("\n")
elif chr == "r":
chunks.append("\r")
elif chr == "t":
chunks.append("\t")
elif chr == "v":
chunks.append("\v")
elif match.group(2) is not None: # oct
chunks.append(unichr(int(match.group(2), 8)))
elif match.group(3) is not None: # hex
chunks.append(unichr(int(match.group(3), 16)))
elif match.group(4) is not None: # unicode-16
chunks.append(unichr(int(match.group(4), 16)))
elif match.group(5) is not None: # unicode-32
try:
chunks.append(unichr(int(match.group(5), 16)))
except ValueError:
error = diagnostic.Diagnostic(
"error", "unicode character out of range", {},
source.Range(self.source_buffer,
range.begin_pos + match.start(0),
range.begin_pos + match.end(0)))
self.diagnostic_engine.process(error)
elif match.group(6) is not None: # unicode-name
try:
chunks.append(unicodedata.lookup(match.group(6)))
except KeyError:
error = diagnostic.Diagnostic(
"error", "unknown unicode character name", {},
source.Range(self.source_buffer,
range.begin_pos + match.start(0),
range.begin_pos + match.end(0)))
self.diagnostic_engine.process(error)
return "".join(chunks)
def _replace_escape_bytes(self, value):
chunks = []
offset = 0
while offset < len(value):
match = self._lex_escape_re.search(value, offset)
if match is None:
# Append the remaining of the string
chunks.append(value[offset:])
break
# Append the part of string before match
chunks.append(value[offset:match.start()])
offset = match.end()
# Process the escape
if match.group(1) is not None: # single-char
chr = match.group(1)
if chr == b"\n":
pass
elif chr == b"\\" or chr == b"'" or chr == b"\"":
chunks.append(chr)
elif chr == b"a":
chunks.append(b"\a")
elif chr == b"b":
chunks.append(b"\b")
elif chr == b"f":
chunks.append(b"\f")
elif chr == b"n":
chunks.append(b"\n")
elif chr == b"r":
chunks.append(b"\r")
elif chr == b"t":
chunks.append(b"\t")
elif chr == b"v":
chunks.append(b"\v")
elif match.group(2) is not None: # oct
chunks.append(byte(int(match.group(2), 8)))
elif match.group(3) is not None: # hex
chunks.append(byte(int(match.group(3), 16)))
return b"".join(chunks)
def _check_long_literal(self, range, literal):
if literal[-1] in "lL" and self.version >= (3, 0):
error = diagnostic.Diagnostic(
"error", "in Python 3, long integer literals were removed", {},
source.Range(self.source_buffer, range.end_pos - 1, range.end_pos))
self.diagnostic_engine.process(error)
def _match_pair_delim(self, range, kwop):
if kwop == "(":
self.parentheses.append(range)
elif kwop == "[":
self.square_braces.append(range)
elif kwop == "{":
self.curly_braces.append(range)
elif kwop == ")":
self._check_innermost_pair_delim(range, "(")
self.parentheses.pop()
elif kwop == "]":
self._check_innermost_pair_delim(range, "[")
self.square_braces.pop()
elif kwop == "}":
self._check_innermost_pair_delim(range, "{")
self.curly_braces.pop()
def _check_innermost_pair_delim(self, range, expected):
ranges = []
if len(self.parentheses) > 0:
ranges.append(("(", self.parentheses[-1]))
if len(self.square_braces) > 0:
ranges.append(("[", self.square_braces[-1]))
if len(self.curly_braces) > 0:
ranges.append(("{", self.curly_braces[-1]))
ranges.sort(key=lambda k: k[1].begin_pos)
if any(ranges):
compl_kind, compl_range = ranges[-1]
if compl_kind != expected:
note = diagnostic.Diagnostic(
"note", "'{delimiter}' opened here",
{"delimiter": compl_kind},
compl_range)
error = diagnostic.Diagnostic(
"fatal", "mismatched '{delimiter}'",
{"delimiter": range.source()},
range, notes=[note])
self.diagnostic_engine.process(error)
else:
error = diagnostic.Diagnostic(
"fatal", "mismatched '{delimiter}'",
{"delimiter": range.source()},
range)
self.diagnostic_engine.process(error)
def __iter__(self):
return self
def __next__(self):
return self.next()
| apache-2.0 |
pbassut/pagarme-python | pagarme/transaction.py | 3 | 3904 | # encoding: utf-8
import json
import requests
from .exceptions import PagarmeApiError, NotPaidException, NotBoundException
from .resource import AbstractResource
class Transaction(AbstractResource):
BASE_URL = 'https://api.pagar.me/1/transactions'
def __init__(
self,
api_key=None,
amount=None,
card_hash=None,
card_id=None,
payment_method='credit_card',
installments=1,
postback_url=None,
metadata={},
soft_descriptor='',
customer=None,
**kwargs):
self.amount = amount
self.api_key = api_key
self.card_hash = card_hash
self.card_id = card_id
self.payment_method = payment_method
self.installments = installments
self.postback_url = postback_url
self.metadata = metadata
self.soft_descriptor = soft_descriptor[:13]
self.id = None
self.data = {}
self.customer = customer
for key, value in kwargs.items():
self.data[key] = value
def error(self, response):
data = json.loads(response)
e = data['errors'][0]
error_string = e['type'] + ' - ' + e['message']
raise PagarmeApiError(error_string)
def charge(self):
post_data = self.get_data()
url = self.BASE_URL
pagarme_response = requests.post(url, data=post_data)
if pagarme_response.status_code == 200:
self.handle_response(json.loads(pagarme_response.content))
else:
self.error(pagarme_response.content)
def handle_response(self, data):
self.id = data['id']
self.status = data['status']
self.card = data['card']
self.postback_url = data['postback_url']
self.metadata = data['metadata']
self.data = data
def capture(self):
if self.id is None:
raise NotBoundException('First try search your transaction')
url = self.BASE_URL + '/' + str(self.id) + '/caputre'
data = {'api_key': self.api_key}
pagarme_response = requests.post(url, data=data)
if pagarme_response.status_code == 200:
self.handle_response(json.loads(pagarme_response.content))
else:
self.error(pagarme_response.content)
def get_data(self):
return self.__dict__()
def __dict__(self):
d = self.data
d['api_key'] = self.api_key
if self.amount:
d['amount'] = self.amount
d['card_hash'] = self.card_hash
d['card_id'] = self.card_id
d['installments'] = self.installments
d['payment_method'] = self.payment_method
d['soft_descriptor'] = self.soft_descriptor[:13]
if self.metadata:
for key, value in self.metadata.items():
new_key = 'metadata[{key}]'.format(key=key)
d[new_key] = value
if self.postback_url:
d['postback_url'] = self.postback_url
if self.customer:
d.update(self.customer.get_anti_fraud_data())
return d
def find_by_id(self, id=None):
url = self.BASE_URL + '/' + str(id)
pagarme_response = requests.get(url, data=self.get_data())
if pagarme_response.status_code == 200:
self.handle_response(json.loads(pagarme_response.content))
else:
self.error(pagarme_response.content)
def refund(self):
if self.id is None:
raise NotPaidException('Id not suplied')
url = self.BASE_URL + '/' + str(self.id) + '/refund'
pagarme_response = requests.post(url, data=self.get_data())
if pagarme_response.status_code == 200:
self.handle_response(json.loads(pagarme_response.content))
else:
self.error(pagarme_response.content)
| mit |
codoranro/ag-openwrt-15.05 | scripts/dl_cleanup.py | 177 | 5942 | #!/usr/bin/env python
"""
# OpenWrt download directory cleanup utility.
# Delete all but the very last version of the program tarballs.
#
# Copyright (C) 2010 Michael Buesch <mb@bu3sch.de>
# Copyright (C) 2013 OpenWrt.org
"""
import sys
import os
import re
import getopt
# Commandline options
opt_dryrun = False
def parseVer_1234(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
(int(match.group(5)) << 16)
return (progname, progversion)
def parseVer_123(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(5)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
patchlevel
return (progname, progversion)
def parseVer_12(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(4)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
patchlevel
return (progname, progversion)
def parseVer_r(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64)
return (progname, progversion)
def parseVer_ymd(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32)
return (progname, progversion)
def parseVer_GIT(match, filepath):
progname = match.group(1)
st = os.stat(filepath)
progversion = int(st.st_mtime) << 64
return (progname, progversion)
extensions = (
".tar.gz",
".tar.bz2",
".tar.xz",
".orig.tar.gz",
".orig.tar.bz2",
".orig.tar.xz",
".zip",
".tgz",
".tbz",
".txz",
)
versionRegex = (
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4
(re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD
(re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a
(re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3
(re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a
(re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111
)
blacklist = [
("linux", re.compile(r"linux-.*")),
("gcc", re.compile(r"gcc-.*")),
("wl_apsta", re.compile(r"wl_apsta.*")),
(".fw", re.compile(r".*\.fw")),
(".arm", re.compile(r".*\.arm")),
(".bin", re.compile(r".*\.bin")),
("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")),
]
class EntryParseError(Exception): pass
class Entry:
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.progname = ""
self.fileext = ""
for ext in extensions:
if filename.endswith(ext):
filename = filename[0:0-len(ext)]
self.fileext = ext
break
else:
print self.filename, "has an unknown file-extension"
raise EntryParseError("ext")
for (regex, parseVersion) in versionRegex:
match = regex.match(filename)
if match:
(self.progname, self.version) = parseVersion(
match, directory + "/" + filename + self.fileext)
break
else:
print self.filename, "has an unknown version pattern"
raise EntryParseError("ver")
def deleteFile(self):
path = (self.directory + "/" + self.filename).replace("//", "/")
print "Deleting", path
if not opt_dryrun:
os.unlink(path)
def __eq__(self, y):
return self.filename == y.filename
def __ge__(self, y):
return self.version >= y.version
def usage():
print "OpenWrt download directory cleanup utility"
print "Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>"
print ""
print " -d|--dry-run Do a dry-run. Don't delete any files"
print " -B|--show-blacklist Show the blacklist and exit"
print " -w|--whitelist ITEM Remove ITEM from blacklist"
def main(argv):
global opt_dryrun
try:
(opts, args) = getopt.getopt(argv[1:],
"hdBw:",
[ "help", "dry-run", "show-blacklist", "whitelist=", ])
if len(args) != 1:
usage()
return 1
except getopt.GetoptError:
usage()
return 1
directory = args[0]
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-d", "--dry-run"):
opt_dryrun = True
if o in ("-w", "--whitelist"):
for i in range(0, len(blacklist)):
(name, regex) = blacklist[i]
if name == v:
del blacklist[i]
break
else:
print "Whitelist error: Item", v,\
"is not in blacklist"
return 1
if o in ("-B", "--show-blacklist"):
for (name, regex) in blacklist:
print name
return 0
# Create a directory listing and parse the file names.
entries = []
for filename in os.listdir(directory):
if filename == "." or filename == "..":
continue
for (name, regex) in blacklist:
if regex.match(filename):
if opt_dryrun:
print filename, "is blacklisted"
break
else:
try:
entries.append(Entry(directory, filename))
except (EntryParseError), e: pass
# Create a map of programs
progmap = {}
for entry in entries:
if entry.progname in progmap.keys():
progmap[entry.progname].append(entry)
else:
progmap[entry.progname] = [entry,]
# Traverse the program map and delete everything but the last version
for prog in progmap:
lastVersion = None
versions = progmap[prog]
for version in versions:
if lastVersion is None or version >= lastVersion:
lastVersion = version
if lastVersion:
for version in versions:
if version != lastVersion:
version.deleteFile()
if opt_dryrun:
print "Keeping", lastVersion.filename
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-2.0 |
logviewer/logviewer | src/logviewer/urls.py | 1 | 2350 | from django.conf.urls import patterns, include, url
from django.conf import settings
'''
Module that keeps url redirection settings.
@package logviewer.urls
@authors Deniz Eren
@authors Ibrahim Ercan
@authors Ersan Vural Zorlu
@authors Nijad Ahmadli
@copyright This project is released under BSD license
@date 2013/03/31
'''
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
#url(r'^$', 'logviewer.views.home', name='home'),
#url(r'^index/', include('logviewer.templates.index')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^search/(?P<log_type>\w{0,20})/', 'logviewer.views.search.search'),
url(r'^dsearch/(?P<log_type>\w{0,20})/(?P<log_date>[0-9T:-]{0,25})', 'logviewer.views.dsearch.dsearch'),
url(r'^dsearch/(?P<log_type>\w{0,20})', 'logviewer.views.dsearch.dsearch_columns'),
url(r'^$', 'logviewer.views.index.index'),
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
url(r'^logout/$', 'logviewer.views.logout.logout_action'),
url(r'^managefilters/$', 'logviewer.views.managefilters.manage'),
url(r'^managefilters/deletefilter$', 'logviewer.views.managefilters.deletefilter'),
url(r'^managefilters/editfilter$', 'logviewer.views.managefilters.editfilter'),
url(r'^managefilters/savefilter$', 'logviewer.views.managefilters.savefilter'),
url(r'^managefilters/getfilters$', 'logviewer.views.managefilters.getfilters'),
url(r'^managefilters/getfilterinfo$', 'logviewer.views.managefilters.getfilterinfo'),
url(r'^settings/$', 'logviewer.views.settings.settings'),
url(r'^settings/save_server/$', 'logviewer.views.settings.save_server'),
url(r'^settings/get_log_settings/$', 'logviewer.views.settings.get_log_settings'),
url(r'^settings/get_log_types/$', 'logviewer.views.settings.get_log_types'),
url(r'^settings/save_log_settings/$', 'logviewer.views.settings.save_log_settings'),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}),
)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.