repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
bright-sparks/chromium-spacewalk | third_party/cython/src/Cython/Compiler/Pipeline.py | 90 | 13171 | import itertools
from time import time
import Errors
import DebugFlags
import Options
from Visitor import CythonTransform
from Errors import CompileError, InternalError, AbortError
import Naming
#
# Really small pipeline stages
#
def dumptree(t):
# For quick debugging in pipelines
print t.dump()
return t
def abort_on_errors(node):
# Stop the pipeline if there are any errors.
if Errors.num_errors != 0:
raise AbortError("pipeline break")
return node
def parse_stage_factory(context):
def parse(compsrc):
source_desc = compsrc.source_desc
full_module_name = compsrc.full_module_name
initial_pos = (source_desc, 1, 0)
saved_cimport_from_pyx, Options.cimport_from_pyx = Options.cimport_from_pyx, False
scope = context.find_module(full_module_name, pos = initial_pos, need_pxd = 0,
check_module_name = not Options.embed)
Options.cimport_from_pyx = saved_cimport_from_pyx
tree = context.parse(source_desc, scope, pxd = 0, full_module_name = full_module_name)
tree.compilation_source = compsrc
tree.scope = scope
tree.is_pxd = False
return tree
return parse
def parse_pxd_stage_factory(context, scope, module_name):
def parse(source_desc):
tree = context.parse(source_desc, scope, pxd=True,
full_module_name=module_name)
tree.scope = scope
tree.is_pxd = True
return tree
return parse
def generate_pyx_code_stage_factory(options, result):
def generate_pyx_code_stage(module_node):
module_node.process_implementation(options, result)
result.compilation_source = module_node.compilation_source
return result
return generate_pyx_code_stage
def inject_pxd_code_stage_factory(context):
def inject_pxd_code_stage(module_node):
from textwrap import dedent
stats = module_node.body.stats
for name, (statlistnode, scope) in context.pxds.iteritems():
module_node.merge_in(statlistnode, scope)
return module_node
return inject_pxd_code_stage
def use_utility_code_definitions(scope, target, seen=None):
if seen is None:
seen = set()
for entry in scope.entries.itervalues():
if entry in seen:
continue
seen.add(entry)
if entry.used and entry.utility_code_definition:
target.use_utility_code(entry.utility_code_definition)
for required_utility in entry.utility_code_definition.requires:
target.use_utility_code(required_utility)
elif entry.as_module:
use_utility_code_definitions(entry.as_module, target, seen)
def inject_utility_code_stage_factory(context):
def inject_utility_code_stage(module_node):
use_utility_code_definitions(context.cython_scope, module_node.scope)
added = []
# Note: the list might be extended inside the loop (if some utility code
# pulls in other utility code, explicitly or implicitly)
for utilcode in module_node.scope.utility_code_list:
if utilcode in added: continue
added.append(utilcode)
if utilcode.requires:
for dep in utilcode.requires:
if not dep in added and not dep in module_node.scope.utility_code_list:
module_node.scope.utility_code_list.append(dep)
tree = utilcode.get_tree()
if tree:
module_node.merge_in(tree.body, tree.scope, merge_scope=True)
return module_node
return inject_utility_code_stage
class UseUtilityCodeDefinitions(CythonTransform):
# Temporary hack to use any utility code in nodes' "utility_code_definitions".
# This should be moved to the code generation phase of the relevant nodes once
# it is safe to generate CythonUtilityCode at code generation time.
def __call__(self, node):
self.scope = node.scope
return super(UseUtilityCodeDefinitions, self).__call__(node)
def process_entry(self, entry):
if entry:
for utility_code in (entry.utility_code, entry.utility_code_definition):
if utility_code:
self.scope.use_utility_code(utility_code)
def visit_AttributeNode(self, node):
self.process_entry(node.entry)
return node
def visit_NameNode(self, node):
self.process_entry(node.entry)
self.process_entry(node.type_entry)
return node
#
# Pipeline factories
#
def create_pipeline(context, mode, exclude_classes=()):
assert mode in ('pyx', 'py', 'pxd')
from Visitor import PrintTree
from ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse
from ParseTreeTransforms import ForwardDeclareTypes, AnalyseDeclarationsTransform
from ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes
from ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
from ParseTreeTransforms import InterpretCompilerDirectives, TransformBuiltinMethods
from ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform
from ParseTreeTransforms import CalculateQualifiedNamesTransform
from TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic
from ParseTreeTransforms import AdjustDefByDirectives, AlignFunctionDefinitions
from ParseTreeTransforms import RemoveUnreachableCode, GilCheck
from FlowControl import ControlFlowAnalysis
from AnalysedTreeTransforms import AutoTestDictTransform
from AutoDocTransforms import EmbedSignature
from Optimize import FlattenInListTransform, SwitchTransform, IterationTransform
from Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls
from Optimize import InlineDefNodeCalls
from Optimize import ConstantFolding, FinalOptimizePhase
from Optimize import DropRefcountingTransform
from Optimize import ConsolidateOverflowCheck
from Buffer import IntroduceBufferAuxiliaryVars
from ModuleNode import check_c_declarations, check_c_declarations_pxd
if mode == 'pxd':
_check_c_declarations = check_c_declarations_pxd
_specific_post_parse = PxdPostParse(context)
else:
_check_c_declarations = check_c_declarations
_specific_post_parse = None
if mode == 'py':
_align_function_definitions = AlignFunctionDefinitions(context)
else:
_align_function_definitions = None
# NOTE: This is the "common" parts of the pipeline, which is also
# code in pxd files. So it will be run multiple times in a
# compilation stage.
stages = [
NormalizeTree(context),
PostParse(context),
_specific_post_parse,
InterpretCompilerDirectives(context, context.compiler_directives),
ParallelRangeTransform(context),
AdjustDefByDirectives(context),
MarkClosureVisitor(context),
_align_function_definitions,
RemoveUnreachableCode(context),
ConstantFolding(),
FlattenInListTransform(),
WithTransform(context),
DecoratorTransform(context),
ForwardDeclareTypes(context),
AnalyseDeclarationsTransform(context),
AutoTestDictTransform(context),
EmbedSignature(context),
EarlyReplaceBuiltinCalls(context), ## Necessary?
TransformBuiltinMethods(context), ## Necessary?
MarkParallelAssignments(context),
ControlFlowAnalysis(context),
RemoveUnreachableCode(context),
# MarkParallelAssignments(context),
MarkOverflowingArithmetic(context),
IntroduceBufferAuxiliaryVars(context),
_check_c_declarations,
InlineDefNodeCalls(context),
AnalyseExpressionsTransform(context),
FindInvalidUseOfFusedTypes(context),
ExpandInplaceOperators(context),
OptimizeBuiltinCalls(context), ## Necessary?
CreateClosureClasses(context), ## After all lookups and type inference
CalculateQualifiedNamesTransform(context),
ConsolidateOverflowCheck(context),
IterationTransform(context),
SwitchTransform(),
DropRefcountingTransform(),
FinalOptimizePhase(context),
GilCheck(),
UseUtilityCodeDefinitions(context),
]
filtered_stages = []
for s in stages:
if s.__class__ not in exclude_classes:
filtered_stages.append(s)
return filtered_stages
def create_pyx_pipeline(context, options, result, py=False, exclude_classes=()):
if py:
mode = 'py'
else:
mode = 'pyx'
test_support = []
if options.evaluate_tree_assertions:
from Cython.TestUtils import TreeAssertVisitor
test_support.append(TreeAssertVisitor())
if options.gdb_debug:
from Cython.Debugger import DebugWriter # requires Py2.5+
from ParseTreeTransforms import DebugTransform
context.gdb_debug_outputwriter = DebugWriter.CythonDebugWriter(
options.output_dir)
debug_transform = [DebugTransform(context, options, result)]
else:
debug_transform = []
return list(itertools.chain(
[parse_stage_factory(context)],
create_pipeline(context, mode, exclude_classes=exclude_classes),
test_support,
[inject_pxd_code_stage_factory(context),
inject_utility_code_stage_factory(context),
abort_on_errors],
debug_transform,
[generate_pyx_code_stage_factory(options, result)]))
def create_pxd_pipeline(context, scope, module_name):
from CodeGeneration import ExtractPxdCode
# The pxd pipeline ends up with a CCodeWriter containing the
# code of the pxd, as well as a pxd scope.
return [
parse_pxd_stage_factory(context, scope, module_name)
] + create_pipeline(context, 'pxd') + [
ExtractPxdCode()
]
def create_py_pipeline(context, options, result):
return create_pyx_pipeline(context, options, result, py=True)
def create_pyx_as_pxd_pipeline(context, result):
from ParseTreeTransforms import AlignFunctionDefinitions, \
MarkClosureVisitor, WithTransform, AnalyseDeclarationsTransform
from Optimize import ConstantFolding, FlattenInListTransform
from Nodes import StatListNode
pipeline = []
pyx_pipeline = create_pyx_pipeline(context, context.options, result,
exclude_classes=[
AlignFunctionDefinitions,
MarkClosureVisitor,
ConstantFolding,
FlattenInListTransform,
WithTransform
])
for stage in pyx_pipeline:
pipeline.append(stage)
if isinstance(stage, AnalyseDeclarationsTransform):
# This is the last stage we need.
break
def fake_pxd(root):
for entry in root.scope.entries.values():
if not entry.in_cinclude:
entry.defined_in_pxd = 1
if entry.name == entry.cname and entry.visibility != 'extern':
# Always mangle non-extern cimported entries.
entry.cname = entry.scope.mangle(Naming.func_prefix, entry.name)
return StatListNode(root.pos, stats=[]), root.scope
pipeline.append(fake_pxd)
return pipeline
def insert_into_pipeline(pipeline, transform, before=None, after=None):
"""
Insert a new transform into the pipeline after or before an instance of
the given class. e.g.
pipeline = insert_into_pipeline(pipeline, transform,
after=AnalyseDeclarationsTransform)
"""
assert before or after
cls = before or after
for i, t in enumerate(pipeline):
if isinstance(t, cls):
break
if after:
i += 1
return pipeline[:i] + [transform] + pipeline[i:]
#
# Running a pipeline
#
def run_pipeline(pipeline, source, printtree=True):
from Cython.Compiler.Visitor import PrintTree
error = None
data = source
try:
try:
for phase in pipeline:
if phase is not None:
if DebugFlags.debug_verbose_pipeline:
t = time()
print "Entering pipeline phase %r" % phase
if not printtree and isinstance(phase, PrintTree):
continue
data = phase(data)
if DebugFlags.debug_verbose_pipeline:
print " %.3f seconds" % (time() - t)
except CompileError, err:
# err is set
Errors.report_error(err)
error = err
except InternalError, err:
# Only raise if there was not an earlier error
if Errors.num_errors == 0:
raise
error = err
except AbortError, err:
error = err
return (error, data)
| bsd-3-clause |
hexlism/css_platform | sleepyenv/lib/python2.7/site-packages/MarkupSafe-0.23-py2.7-linux-x86_64.egg/markupsafe/tests.py | 674 | 6107 | # -*- coding: utf-8 -*-
import gc
import sys
import unittest
from markupsafe import Markup, escape, escape_silent
from markupsafe._compat import text_type
class MarkupTestCase(unittest.TestCase):
def test_adding(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
def test_string_interpolation(self):
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
assert Markup('%i') % 3.14 == '3'
assert Markup('%.2f') % 3.14 == '3.14'
def test_type_behavior(self):
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
def test_html_interop(self):
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
__str__ = __unicode__
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
def test_tuple_interpol(self):
self.assertEqual(Markup('<em>%s:%s</em>') % (
'<foo>',
'<bar>',
), Markup(u'<em><foo>:<bar></em>'))
def test_dict_interpol(self):
self.assertEqual(Markup('<em>%(foo)s</em>') % {
'foo': '<foo>',
}, Markup(u'<em><foo></em>'))
self.assertEqual(Markup('<em>%(foo)s:%(bar)s</em>') % {
'foo': '<foo>',
'bar': '<bar>',
}, Markup(u'<em><foo>:<bar></em>'))
def test_escaping(self):
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_formatting(self):
for actual, expected in (
(Markup('%i') % 3.14, '3'),
(Markup('%.2f') % 3.14159, '3.14'),
(Markup('%s %s %s') % ('<', 123, '>'), '< 123 >'),
(Markup('<em>{awesome}</em>').format(awesome='<awesome>'),
'<em><awesome></em>'),
(Markup('{0[1][bar]}').format([0, {'bar': '<bar/>'}]),
'<bar/>'),
(Markup('{0[1][bar]}').format([0, {'bar': Markup('<bar/>')}]),
'<bar/>')):
assert actual == expected, "%r should be %r!" % (actual, expected)
# This is new in 2.7
if sys.version_info >= (2, 7):
def test_formatting_empty(self):
formatted = Markup('{}').format(0)
assert formatted == Markup('0')
def test_custom_formatting(self):
class HasHTMLOnly(object):
def __html__(self):
return Markup('<foo>')
class HasHTMLAndFormat(object):
def __html__(self):
return Markup('<foo>')
def __html_format__(self, spec):
return Markup('<FORMAT>')
assert Markup('{0}').format(HasHTMLOnly()) == Markup('<foo>')
assert Markup('{0}').format(HasHTMLAndFormat()) == Markup('<FORMAT>')
def test_complex_custom_formatting(self):
class User(object):
def __init__(self, id, username):
self.id = id
self.username = username
def __html_format__(self, format_spec):
if format_spec == 'link':
return Markup('<a href="/user/{0}">{1}</a>').format(
self.id,
self.__html__(),
)
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
def __html__(self):
return Markup('<span class=user>{0}</span>').format(self.username)
user = User(1, 'foo')
assert Markup('<p>User: {0:link}').format(user) == \
Markup('<p>User: <a href="/user/1"><span class=user>foo</span></a>')
def test_all_set(self):
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent(self):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent('<foo>') == Markup(u'<foo>')
def test_splitting(self):
self.assertEqual(Markup('a b').split(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a b').rsplit(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a\nb').splitlines(), [
Markup('a'),
Markup('b')
])
def test_mul(self):
self.assertEqual(Markup('a') * 3, Markup('aaa'))
class MarkupLeakTestCase(unittest.TestCase):
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# vim:sts=4:sw=4:et:
| apache-2.0 |
helmuthb/devfestsched | gae/src/com/kupriyanov/spreadsheet/GoogleSpreadsheetParser.py | 3 | 1280 |
import logging
def getRowValue(row, format, column_name):
logging.info('getRowValue[%s]:%s' % (column_name, row))
if str(column_name) == '':
raise ValueError('column_name must not empty')
begin = row.find('%s:' % column_name)
logging.info('begin:%s' % begin)
if begin == -1:
return ''
begin = begin + len(column_name) + 1
end = -1
found_begin = False
for entity in format:
logging.info('checking:%s' % entity)
if found_begin and row.find(entity) != -1:
end = row.find(entity)
break
if entity == column_name:
found_begin = True
#check if last element
if format[len(format) -1 ] == column_name:
end = len(row)
else:
if end == -1:
end = len(row)
else:
end = end - 2
logging.info('%s:%s' % (column_name, row) )
#logging.info('speakertitle[%s]' % speaker_title )
#logging.info('%s:%s' % (column_name, row.find(column_name)))
# logging.info('%s - %s' % (begin, end))
value = row[begin: end].strip()
logging.info('%s[%s-%s]:[%s]' % (column_name, begin, end, value))
return value | apache-2.0 |
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/t/try_except_raise.py | 2 | 2015 | # pylint:disable=missing-docstring, unreachable, bad-except-order, bare-except, unnecessary-pass
# pylint: disable=undefined-variable, broad-except, raise-missing-from
try:
int("9a")
except: # [try-except-raise]
raise
try:
int("9a")
except:
raise ValueError('Invalid integer')
try:
int("9a")
except: # [try-except-raise]
raise
print('caught exception')
try:
int("9a")
except:
print('caught exception')
raise
class AAAException(Exception):
"""AAAException"""
pass
class BBBException(AAAException):
"""BBBException"""
pass
def ccc():
"""try-except-raise test function"""
try:
raise BBBException("asdf")
except BBBException:
raise
except AAAException:
raise BBBException("raised from AAAException")
def ddd():
"""try-except-raise test function"""
try:
raise BBBException("asdf")
except AAAException:
raise BBBException("raised from AAAException")
except: # [try-except-raise]
raise
try:
pass
except RuntimeError:
raise
except:
print("a failure")
try:
pass
except:
print("a failure")
except RuntimeError: # [try-except-raise]
raise
try:
pass
except: # [try-except-raise]
raise
except RuntimeError:
print("a failure")
try:
pass
except (FileNotFoundError, PermissionError):
raise
except OSError:
print("a failure")
# also consider tuples for subsequent exception handler instead of just bare except handler
try:
pass
except (FileNotFoundError, PermissionError):
raise
except (OverflowError, OSError):
print("a failure")
try:
pass
except (FileNotFoundError, PermissionError): # [try-except-raise]
raise
except (OverflowError, ZeroDivisionError):
print("a failure")
try:
pass
except (FileNotFoundError, PermissionError):
raise
except Exception:
print("a failure")
try:
pass
except (FileNotFoundError, PermissionError):
raise
except (Exception,):
print("a failure")
| mit |
rryan/django-cms | cms/tests/menu.py | 6 | 59543 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import copy
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, Permission, Group
from django.template import Template, TemplateSyntaxError
from django.test.utils import override_settings
from django.utils.translation import activate
from menus.base import NavigationNode, Menu
from menus.menu_pool import menu_pool, _build_nodes_inner_for_one_menu
from menus.models import CacheKey
from menus.utils import mark_descendants, find_selected, cut_levels
from cms.api import create_page
from cms.menu import CMSMenu, get_visible_pages
from cms.models import Page, ACCESS_PAGE_AND_DESCENDANTS
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.test_utils.project.sampleapp.menu import StaticMenu, StaticMenu2
from cms.test_utils.fixtures.menus import (MenusFixture, SubMenusFixture,
SoftrootFixture, ExtendedMenusFixture)
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import LanguageOverride
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.test_utils.util.mock import AttributeObject
from cms.utils import get_cms_setting
from cms.utils.i18n import force_language
class BaseMenuTest(CMSTestCase):
def _get_nodes(self, path='/'):
node1 = NavigationNode('1', '/1/', 1)
node2 = NavigationNode('2', '/2/', 2, 1)
node3 = NavigationNode('3', '/3/', 3, 2)
node4 = NavigationNode('4', '/4/', 4, 2)
node5 = NavigationNode('5', '/5/', 5)
nodes = [node1, node2, node3, node4, node5]
tree = _build_nodes_inner_for_one_menu([n for n in nodes], "test")
request = self.get_request(path)
menu_pool.apply_modifiers(tree, request)
return tree, nodes
def setUp(self):
super(BaseMenuTest, self).setUp()
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'CMSMenu': self.old_menu['CMSMenu']}
menu_pool.clear(settings.SITE_ID)
activate("en")
def tearDown(self):
menu_pool.menus = self.old_menu
super(BaseMenuTest, self).tearDown()
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
class MenuDiscoveryTest(ExtendedMenusFixture, CMSTestCase):
def setUp(self):
super(MenuDiscoveryTest, self).setUp()
menu_pool.discovered = False
self.old_menu = menu_pool.menus
menu_pool.menus = {}
menu_pool.discover_menus()
menu_pool.register_menu(StaticMenu)
menu_pool.register_menu(StaticMenu2)
def tearDown(self):
menu_pool.menus = self.old_menu
menu_pool._expanded = False
super(MenuDiscoveryTest, self).tearDown()
def test_menu_types_expansion_basic(self):
request = self.get_request('/')
menu_pool.discover_menus()
self.assertFalse(menu_pool._expanded)
for key, menu in menu_pool.menus.items():
self.assertTrue(issubclass(menu, Menu))
defined_menus = len(menu_pool.menus)
# Testing expansion after get_nodes
menu_pool.get_nodes(request)
self.assertTrue(menu_pool._expanded)
for key, menu in menu_pool.menus.items():
self.assertTrue(isinstance(menu, Menu))
self.assertEqual(defined_menus, len(menu_pool.menus))
def test_menu_expanded(self):
menu_pool.discovered = False
menu_pool.discover_menus()
with self.settings(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
page = create_page("apphooked-page", "nav_playground.html", "en",
published=True, apphook="SampleApp",
navigation_extenders='StaticMenu')
menu_pool._expanded = False
self.assertFalse(menu_pool._expanded)
self.assertTrue(menu_pool.discovered)
menu_pool._expand_menus()
self.assertTrue(menu_pool._expanded)
self.assertTrue(menu_pool.discovered)
# Counts the number of StaticMenu (which is expanded) and StaticMenu2
# (which is not) and checks the keyname for the StaticMenu instances
static_menus = 2
static_menus_2 = 1
for key, menu in menu_pool.menus.items():
if key.startswith('StaticMenu:'):
static_menus -= 1
self.assertTrue(key.endswith(str(page.get_public_object().pk)) or key.endswith(str(page.get_draft_object().pk)))
if key == 'StaticMenu2':
static_menus_2 -= 1
self.assertEqual(static_menus, 0)
self.assertEqual(static_menus_2, 0)
def test_multiple_menus(self):
with self.settings(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
create_page("apphooked-page", "nav_playground.html", "en",
published=True, apphook="SampleApp2")
create_page("apphooked-page", "nav_playground.html", "en",
published=True,
navigation_extenders='StaticMenu')
create_page("apphooked-page", "nav_playground.html", "en",
published=True, apphook="NamespacedApp", apphook_namespace='whatever',
navigation_extenders='StaticMenu')
menu_pool._expanded = False
menu_pool._expand_menus()
self.assertEqual(len(menu_pool.get_menus_by_attribute("cms_enabled", True)), 2)
class ExtendedFixturesMenuTests(ExtendedMenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
| + P9
| + P10
| + P11
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
def get_level(self, num):
return Page.objects.public().filter(level=num)
def get_all_pages(self):
return Page.objects.public()
def test_menu_failfast_on_invalid_usage(self):
context = self.get_context()
context['child'] = self.get_page(1)
# test standard show_menu
with self.settings(DEBUG=True, TEMPLATE_DEBUG=True):
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 'menu/menu.html' child %}")
self.assertRaises(TemplateSyntaxError, tpl.render, context)
def test_show_submenu_nephews(self):
context = self.get_context(path=self.get_page(2).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 1 %}")
tpl.render(context)
nodes = context["children"]
# P2 is the selected node
self.assertTrue(nodes[0].selected)
# Should include P10 but not P11
self.assertEqual(len(nodes[1].children), 1)
self.assertFalse(nodes[1].children[0].children)
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 %}")
tpl.render(context)
nodes = context["children"]
# should now include both P10 and P11
self.assertEqual(len(nodes[1].children), 1)
self.assertEqual(len(nodes[1].children[0].children), 1)
def test_show_submenu_template_root_level_none_no_nephew_limit(self):
context = self.get_context(path=self.get_page(1).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 None 100 %}")
tpl.render(context)
nodes = context["children"]
# default nephew limit, P2 and P9 in the nodes list
self.assertEqual(len(nodes), 2)
class FixturesMenuTests(MenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
def get_level(self, num):
return Page.objects.public().filter(depth=num)
def get_all_pages(self):
return Page.objects.public()
def test_menu_failfast_on_invalid_usage(self):
context = self.get_context()
context['child'] = self.get_page(1)
# test standard show_menu
with self.settings(DEBUG=True, TEMPLATE_DEBUG=True):
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 'menu/menu.html' child %}")
self.assertRaises(TemplateSyntaxError, tpl.render, context)
def test_basic_cms_menu(self):
self.assertEqual(len(menu_pool.menus), 1)
with force_language("en"):
response = self.client.get(self.get_pages_root()) # path = '/'
self.assertEqual(response.status_code, 200)
request = self.get_request()
# test the cms menu class
menu = CMSMenu()
nodes = menu.get_nodes(request)
self.assertEqual(len(nodes), len(self.get_all_pages()))
def test_show_menu(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].selected, True)
self.assertEqual(nodes[0].sibling, False)
self.assertEqual(nodes[0].descendant, False)
self.assertEqual(nodes[0].children[0].descendant, True)
self.assertEqual(nodes[0].children[0].children[0].descendant, True)
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[1].get_absolute_url(), self.get_page(4).get_absolute_url())
self.assertEqual(nodes[1].sibling, True)
self.assertEqual(nodes[1].selected, False)
def test_show_menu_num_queries(self):
context = self.get_context()
# test standard show_menu
with self.assertNumQueries(FuzzyInt(5, 7)):
"""
The queries should be:
get all pages
get all page permissions
get all titles
get the menu cache key
create a savepoint (in django>=1.6)
set the menu cache key
release the savepoint (in django>=1.6)
"""
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
def test_show_menu_cache_key_leak(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
self.assertEqual(CacheKey.objects.count(), 0)
tpl.render(context)
self.assertEqual(CacheKey.objects.count(), 1)
tpl.render(context)
self.assertEqual(CacheKey.objects.count(), 1)
def test_menu_keys_duplicate_truncates(self):
"""
When two objects with the same characteristics are present in the
database, get_or_create truncates the database table to "invalidate"
the cache, before retrying. This can happen after migrations, and since
it's only cache, we don't want any propagation of errors.
"""
CacheKey.objects.create(language="fr", site=1, key="a")
CacheKey.objects.create(language="fr", site=1, key="a")
CacheKey.objects.get_or_create(language="fr", site=1, key="a")
self.assertEqual(CacheKey.objects.count(), 1)
def test_only_active_tree(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 0)
self.assertEqual(len(nodes[0].children), 1)
self.assertEqual(len(nodes[0].children[0].children), 1)
context = self.get_context(path=self.get_page(4).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 1)
self.assertEqual(len(nodes[0].children), 0)
def test_only_one_active_level(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 0)
self.assertEqual(len(nodes[0].children), 1)
self.assertEqual(len(nodes[0].children[0].children), 0)
def test_only_level_zero(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 %}")
tpl.render(context)
nodes = context['children']
for node in nodes:
self.assertEqual(len(node.children), 0)
def test_only_level_one(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 1 1 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), len(self.get_level(2)))
for node in nodes:
self.assertEqual(len(node.children), 0)
def test_only_level_one_active(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 1 1 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].descendant, True)
self.assertEqual(len(nodes[0].children), 0)
def test_level_zero_and_one(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 1 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
for node in nodes:
self.assertEqual(len(node.children), 1)
def test_show_submenu(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(nodes[0].descendant, True)
self.assertEqual(len(nodes), 1)
self.assertEqual(len(nodes[0].children), 1)
tpl = Template("{% load menu_tags %}{% show_sub_menu 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(len(nodes[0].children), 0)
context = self.get_context(path=self.get_page(3).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 %}")
tpl.render(context)
nodes = context["children"]
# P3 is the selected node
self.assertFalse(nodes[0].selected)
self.assertTrue(nodes[0].children[0].selected)
# top level node should be P2
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(2).get_absolute_url())
# should include P3 as well
self.assertEqual(len(nodes[0].children), 1)
context = self.get_context(path=self.get_page(2).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 0 %}")
tpl.render(context)
nodes = context["children"]
# P1 should be in the nav
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(1).get_absolute_url())
# P2 is selected
self.assertTrue(nodes[0].children[0].selected)
def test_show_submenu_template_root_level_none(self):
context = self.get_context(path=self.get_page(1).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 None 1 %}")
tpl.render(context)
nodes = context["children"]
# First node is P2 (P1 children) thus not selected
self.assertFalse(nodes[0].selected)
# nephew limit of 1, so only P2 is the nodes list
self.assertEqual(len(nodes), 1)
# P3 is a child of P2, but not in nodes list
self.assertTrue(nodes[0].children)
def test_show_breadcrumb(self):
context = self.get_context(path=self.get_page(3).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 2)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 1)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 0)
page1 = self.get_page(1)
page1.in_navigation = False
page1.save()
page2 = self.get_page(2)
context = self.get_context(path=page2.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(isinstance(nodes[0], NavigationNode), True)
self.assertEqual(nodes[1].get_absolute_url(), page2.get_absolute_url())
def test_language_chooser(self):
# test simple language chooser with default args
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[1][0]['public'] = False
with self.settings(CMS_LANGUAGES=lang_settings):
context = self.get_context(path=self.get_page(3).get_absolute_url())
tpl = Template("{% load menu_tags %}{% language_chooser %}")
tpl.render(context)
self.assertEqual(len(context['languages']), 3)
# try a different template and some different args
tpl = Template("{% load menu_tags %}{% language_chooser 'menu/test_language_chooser.html' %}")
tpl.render(context)
self.assertEqual(context['template'], 'menu/test_language_chooser.html')
tpl = Template("{% load menu_tags %}{% language_chooser 'short' 'menu/test_language_chooser.html' %}")
tpl.render(context)
self.assertEqual(context['template'], 'menu/test_language_chooser.html')
for lang in context['languages']:
self.assertEqual(*lang)
def test_page_language_url(self):
path = self.get_page(3).get_absolute_url()
context = self.get_context(path=path)
tpl = Template("{%% load menu_tags %%}{%% page_language_url '%s' %%}" % 'en')
url = tpl.render(context)
self.assertEqual(url, "%s" % path)
def test_show_menu_below_id(self):
page2 = self.get_page(2)
page2.reverse_id = "hello"
page2.save()
page2 = self.reload(page2)
self.assertEqual(page2.reverse_id, "hello")
page5 = self.get_page(5)
context = self.get_context(path=page5.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'hello' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
page3_url = self.get_page(3).get_absolute_url()
self.assertEqual(nodes[0].get_absolute_url(), page3_url)
page2.in_navigation = False
page2.save()
context = self.get_context(path=page5.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'hello' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), page3_url)
def test_unpublished(self):
page2 = self.get_page(2)
page2.title_set.update(published=False)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 0)
def test_home_not_in_menu(self):
page1 = self.get_page(1)
page1.in_navigation = False
page1.save()
page4 = self.get_page(4)
page4.in_navigation = False
page4.save()
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(2).get_absolute_url())
self.assertEqual(nodes[0].children[0].get_absolute_url(), self.get_page(3).get_absolute_url())
page4 = self.get_page(4)
page4.in_navigation = True
page4.save()
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
def test_show_submenu_from_non_menu_page(self):
"""
Here's the structure bit we're interested in:
+ P6 (not in menu)
+ P7
+ P8
When we render P6, there should be a menu entry for P7 and P8 if the
tag parameters are "1 XXX XXX XXX"
"""
page6 = self.get_page(6)
context = self.get_context(page6.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 1 100 0 1 %}")
tpl.render(context)
nodes = context['children']
number_of_p6_children = len(page6.children.filter(in_navigation=True))
self.assertEqual(len(nodes), number_of_p6_children)
page7 = self.get_page(7)
context = self.get_context(page7.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 1 100 0 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), number_of_p6_children)
tpl = Template("{% load menu_tags %}{% show_menu 2 100 0 1 %}")
tpl.render(context)
nodes = context['children']
number_of_p7_children = len(page7.children.filter(in_navigation=True))
self.assertEqual(len(nodes), number_of_p7_children)
def test_show_breadcrumb_invisible(self):
# Must use the drafts to find the parent when calling create_page
parent = Page.objects.drafts().get(title_set__title='P3')
invisible_page = create_page("invisible", "nav_playground.html", "en",
parent=parent, published=True, in_navigation=False)
context = self.get_context(path=invisible_page.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 0 'menu/breadcrumb.html' 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 0 'menu/breadcrumb.html' 0 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 4)
class MenuTests(BaseMenuTest):
def test_build_nodes_inner_for_worst_case_menu(self):
'''
Tests the worst case scenario
node5
node4
node3
node2
node1
'''
node1 = NavigationNode('Test1', '/test1/', 1, 2)
node2 = NavigationNode('Test2', '/test2/', 2, 3)
node3 = NavigationNode('Test3', '/test3/', 3, 4)
node4 = NavigationNode('Test4', '/test4/', 4, 5)
node5 = NavigationNode('Test5', '/test5/', 5, None)
menu_class_name = 'Test'
nodes = [node1, node2, node3, node4, node5, ]
len_nodes = len(nodes)
final_list = _build_nodes_inner_for_one_menu(nodes, menu_class_name)
self.assertEqual(len(final_list), len_nodes)
self.assertEqual(node1.parent, node2)
self.assertEqual(node2.parent, node3)
self.assertEqual(node3.parent, node4)
self.assertEqual(node4.parent, node5)
self.assertEqual(node5.parent, None)
self.assertEqual(node1.children, [])
self.assertEqual(node2.children, [node1])
self.assertEqual(node3.children, [node2])
self.assertEqual(node4.children, [node3])
self.assertEqual(node5.children, [node4])
def test_build_nodes_inner_for_circular_menu(self):
'''
TODO:
To properly handle this test we need to have a circular dependency
detection system.
Go nuts implementing it :)
'''
pass
def test_build_nodes_inner_for_broken_menu(self):
'''
Tests a broken menu tree (non-existing parent)
node5
node4
node3
<non-existant>
node2
node1
'''
node1 = NavigationNode('Test1', '/test1/', 1, 2)
node2 = NavigationNode('Test2', '/test2/', 2, 12)
node3 = NavigationNode('Test3', '/test3/', 3, 4)
node4 = NavigationNode('Test4', '/test4/', 4, 5)
node5 = NavigationNode('Test5', '/test5/', 5, None)
menu_class_name = 'Test'
nodes = [node1, node2, node3, node4, node5, ]
final_list = _build_nodes_inner_for_one_menu(nodes, menu_class_name)
self.assertEqual(len(final_list), 3)
self.assertFalse(node1 in final_list)
self.assertFalse(node2 in final_list)
self.assertEqual(node1.parent, None)
self.assertEqual(node2.parent, None)
self.assertEqual(node3.parent, node4)
self.assertEqual(node4.parent, node5)
self.assertEqual(node5.parent, None)
self.assertEqual(node1.children, [])
self.assertEqual(node2.children, [])
self.assertEqual(node3.children, [])
self.assertEqual(node4.children, [node3])
self.assertEqual(node5.children, [node4])
def test_utils_mark_descendants(self):
tree_nodes, flat_nodes = self._get_nodes()
mark_descendants(tree_nodes)
for node in flat_nodes:
self.assertTrue(node.descendant, node)
def test_utils_find_selected(self):
tree_nodes, flat_nodes = self._get_nodes()
node = flat_nodes[0]
selected = find_selected(tree_nodes)
self.assertEqual(selected, node)
selected = find_selected([])
self.assertEqual(selected, None)
def test_utils_cut_levels(self):
tree_nodes, flat_nodes = self._get_nodes()
self.assertEqual(cut_levels(tree_nodes, 1), [flat_nodes[1]])
def test_empty_menu(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 0)
@override_settings(CMS_PERMISSION=False)
class AdvancedSoftrootTests(SoftrootFixture, CMSTestCase):
"""
Tree in fixture (as taken from issue 662):
top
root
aaa
111
ccc
ddd
222
bbb
333
444
In the fixture, all pages are "in_navigation", "published" and
NOT-"soft_root".
What is a soft root?
If a page is a soft root, it becomes the root page in the menu if
we are currently on or under that page.
If we are above that page, the children of this page are not shown.
"""
def tearDown(self):
Page.objects.all().delete()
def get_page(self, name):
return Page.objects.public().get(title_set__slug=name)
def assertTreeQuality(self, a, b, *attrs):
"""
Checks that the node-lists a and b are the same for attrs.
This is recursive over the tree
"""
msg = '%r != %r with %r, %r' % (len(a), len(b), a, b)
self.assertEqual(len(a), len(b), msg)
for n1, n2 in zip(a, b):
for attr in attrs:
a1 = getattr(n1, attr)
a2 = getattr(n2, attr)
msg = '%r != %r with %r, %r (%s)' % (a1, a2, n1, n2, attr)
self.assertEqual(a1, a2, msg)
self.assertTreeQuality(n1.children, n2.children)
def test_top_not_in_nav(self):
"""
top: not in navigation
tag: show_menu 0 100 0 100
context shared: current page is aaa
context 1: root is NOT a softroot
context 2: root IS a softroot
expected result: the two node-trees should be equal
"""
top = self.get_page('top')
top.in_navigation = False
top.save()
aaa = self.get_page('aaa')
# root is NOT a soft root
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
hard_root = context['children']
# root IS a soft root
root = self.get_page('root')
root.soft_root = True
root.save()
aaa = self.get_page('aaa')
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
soft_root = context['children']
# assert the two trees are equal in terms of 'level' and 'title'
self.assertTreeQuality(hard_root, soft_root, 'level', 'title')
def test_top_in_nav(self):
"""
top: in navigation
tag: show_menu 0 100 0 100
context shared: current page is aaa
context 1: root is NOT a softroot
context 2: root IS a softroot
expected result 1:
0:top
1:root
2:aaa
3:111
4:ccc
5:ddd
3:222
2:bbb
expected result 2:
0:root
1:aaa
2:111
3:ccc
4:ddd
2:222
1:bbb
"""
aaa = self.get_page('aaa')
# root is NOT a soft root
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
hard_root = context['children']
mock_tree = [
AttributeObject(title='top', level=0, children=[
AttributeObject(title='root', level=1, children=[
AttributeObject(title='aaa', level=2, children=[
AttributeObject(title='111', level=3, children=[
AttributeObject(title='ccc', level=4, children=[
AttributeObject(title='ddd', level=5, children=[])
])
]),
AttributeObject(title='222', level=3, children=[])
]),
AttributeObject(title='bbb', level=2, children=[])
])
])
]
self.assertTreeQuality(hard_root, mock_tree)
# root IS a soft root
root = self.get_page('root')
root.soft_root = True
root.save()
aaa = self.get_page('aaa')
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
soft_root = context['children']
mock_tree = [
AttributeObject(title='root', level=0, children=[
AttributeObject(title='aaa', level=1, children=[
AttributeObject(title='111', level=2, children=[
AttributeObject(title='ccc', level=3, children=[
AttributeObject(title='ddd', level=4, children=[])
])
]),
AttributeObject(title='222', level=2, children=[])
]),
AttributeObject(title='bbb', level=1, children=[])
])
]
self.assertTreeQuality(soft_root, mock_tree, 'title', 'level')
class ShowSubMenuCheck(SubMenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
+ P4
| + P5
+ P6
+ P7 (not in menu)
+ P8
"""
def test_show_submenu(self):
page = self.get_page(6)
subpage = self.get_page(8)
context = self.get_context(page.get_absolute_url())
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].id, subpage.pk)
def test_show_submenu_num_queries(self):
page = self.get_page(6)
subpage = self.get_page(8)
context = self.get_context(page.get_absolute_url())
# test standard show_menu
with self.assertNumQueries(FuzzyInt(5, 7)):
"""
The queries should be:
get all pages
get all page permissions
get all titles
get the menu cache key
create a savepoint (in django>=1.6)
set the menu cache key
release the savepoint (in django>=1.6)
"""
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].id, subpage.pk)
class ShowMenuBelowIdTests(BaseMenuTest):
"""
Test for issue 521
Build the following tree:
A
|-B
|-C
\-D (not in nav)
"""
def test_not_in_navigation(self):
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b = create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
c = create_page('C', 'nav_playground.html', 'en', parent=b,
published=True, in_navigation=True)
create_page('D', 'nav_playground.html', 'en', parent=self.reload(b),
published=True, in_navigation=False)
context = self.get_context(a.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1, nodes)
node = nodes[0]
self.assertEqual(node.id, b.publisher_public.id)
children = node.children
self.assertEqual(len(children), 1, repr(children))
child = children[0]
self.assertEqual(child.id, c.publisher_public.id)
def test_menu_beyond_soft_root(self):
"""
Test for issue 4107
Build the following tree:
A
|-B (soft_root)
|-C
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
a = create_page('A', reverse_id='a', **stdkwargs)
b = create_page('B', parent=a, soft_root=True, **stdkwargs)
c = create_page('C', parent=b, **stdkwargs)
context = self.get_context(a.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
a_node = nodes[0]
self.assertEqual(a_node.id, a.publisher_public.pk) # On A, show from A
self.assertEqual(len(a_node.children), 1)
b_node = a_node.children[0]
self.assertEqual(b_node.id, b.publisher_public.pk)
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(b.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On B, show from B
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(c.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On C, show from B
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(a.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On A, show from B (since below A)
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(b.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On B, show from B (since below A)
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(c.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On C, show from B (since below A)
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
def test_not_in_navigation_num_queries(self):
"""
Test for issue 521
Build the following tree:
A
|-B
|-C
\-D (not in nav)
"""
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b = create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
c = create_page('C', 'nav_playground.html', 'en', parent=b,
published=True, in_navigation=True)
create_page('D', 'nav_playground.html', 'en', parent=self.reload(b),
published=True, in_navigation=False)
with LanguageOverride('en'):
context = self.get_context(a.get_absolute_url())
with self.assertNumQueries(FuzzyInt(5, 7)):
"""
The queries should be:
get all pages
get all page permissions
get all titles
get the menu cache key
create a savepoint (in django>=1.6)
set the menu cache key
release the savepoint (in django>=1.6)
"""
# Actually seems to run:
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1, nodes)
node = nodes[0]
self.assertEqual(node.id, b.publisher_public.id)
children = node.children
self.assertEqual(len(children), 1, repr(children))
child = children[0]
self.assertEqual(child.id, c.publisher_public.id)
def test_menu_in_soft_root(self):
"""
Test for issue 3504
Build the following tree:
A
|-B
C (soft_root)
"""
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b = create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
c = create_page('C', 'nav_playground.html', 'en', published=True,
in_navigation=True, soft_root=True)
context = self.get_context(a.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
node = nodes[0]
self.assertEqual(node.id, b.publisher_public.id)
context = self.get_context(c.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
node = nodes[0]
self.assertEqual(node.id, b.publisher_public.id)
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='staff',
)
class ViewPermissionMenuTests(CMSTestCase):
def setUp(self):
self.page = create_page('page', 'nav_playground.html', 'en')
self.pages = [self.page]
self.user = self.get_standard_user()
def get_request(self, user=None):
attrs = {
'user': user or AnonymousUser(),
'REQUEST': {},
'POST': {},
'GET': {},
'session': {},
}
return type('Request', (object,), attrs)
def test_public_for_all_staff(self):
request = self.get_request(self.user)
request.user.is_staff = True
with self.assertNumQueries(1):
"""
The queries are:
PagePermission count query
"""
result = get_visible_pages(request, self.pages)
self.assertEqual(result, [self.page.pk])
@override_settings(CMS_PUBLIC_FOR='all')
def test_public_for_all(self):
request = self.get_request(self.user)
with self.assertNumQueries(1):
"""
The queries are:
PagePermission query for affected pages
"""
result = get_visible_pages(request, self.pages)
self.assertEqual(result, [self.page.pk])
@override_settings(CMS_PUBLIC_FOR='all')
def test_unauthed(self):
request = self.get_request()
with self.assertNumQueries(1):
"""
The query is:
PagePermission query for affected pages
"""
result = get_visible_pages(request, self.pages)
self.assertEqual(result, [self.page.pk])
def test_authed_basic_perm(self):
self.user.user_permissions.add(Permission.objects.get(codename='view_page'))
request = self.get_request(self.user)
with self.assertNumQueries(5):
"""
The queries are:
Site
PagePermission count query
GlobalpagePermission count query
User permissions
Content type
"""
result = get_visible_pages(request, self.pages, self.page.site)
self.assertEqual(result, [self.page.pk])
def test_authed_no_access(self):
request = self.get_request(self.user)
with self.assertNumQueries(5):
"""
The queries are:
Site
View Permission Calculation Query
GlobalpagePermission query for user
User permissions
Content type
"""
result = get_visible_pages(request, self.pages, self.page.site)
self.assertEqual(result, [])
def test_unauthed_no_access(self):
request = self.get_request()
with self.assertNumQueries(1):
result = get_visible_pages(request, self.pages)
self.assertEqual(result, [])
def test_page_permissions(self):
request = self.get_request(self.user)
PagePermission.objects.create(can_view=True, user=self.user, page=self.page)
with self.assertNumQueries(2):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
"""
result = get_visible_pages(request, self.pages)
self.assertEqual(result, [self.page.pk])
def test_page_permissions_view_groups(self):
group = Group.objects.create(name='testgroup')
self.user.groups.add(group)
request = self.get_request(self.user)
PagePermission.objects.create(can_view=True, group=group, page=self.page)
with self.assertNumQueries(3):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
Group query via PagePermission
"""
result = get_visible_pages(request, self.pages)
self.assertEqual(result, [self.page.pk])
def test_global_permission(self):
GlobalPagePermission.objects.create(can_view=True, user=self.user)
request = self.get_request(self.user)
group = Group.objects.create(name='testgroup')
PagePermission.objects.create(can_view=True, group=group, page=self.page)
with self.assertNumQueries(2):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
"""
result = get_visible_pages(request, self.pages)
self.assertEqual(result, [self.page.pk])
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='all',
)
class PublicViewPermissionMenuTests(CMSTestCase):
def setUp(self):
"""
Create this published hierarchy:
A
B1 B2
C1 C2 C3 C4
"""
l = 'nav_playground.html'
kw = dict(published=True, in_navigation=True)
a = create_page('a', l, 'en', **kw)
b1 = create_page('b1', l, 'en', parent=a, **kw)
b2 = create_page('b2', l, 'en', parent=a, **kw)
c1 = create_page('c1', l, 'en', parent=b1, **kw)
c2 = create_page('c2', l, 'en', parent=b1, **kw)
c3 = create_page('c3', l, 'en', parent=b2, **kw)
c4 = create_page('c4', l, 'en', parent=b2, **kw)
self.pages = [a, b1, c1, c2, b2, c3, c4] # tree order
self.site = a.site
self.user = self._create_user("standard", is_staff=False, is_superuser=False)
self.other = self._create_user("other", is_staff=False, is_superuser=False)
PagePermission.objects.create(page=b1, user=self.user, can_view=True,
grant_on=ACCESS_PAGE_AND_DESCENDANTS)
PagePermission.objects.create(page=b2, user=self.other, can_view=True,
grant_on=ACCESS_PAGE_AND_DESCENDANTS)
attrs = {
'user': self.user,
'REQUEST': {},
'POST': {},
'GET': {},
'session': {},
}
self.request = type('Request', (object,), attrs)
def test_draft_list_access(self):
result = get_visible_pages(self.request, self.pages, self.site)
pages = Page.objects.filter(id__in=result).values_list('title_set__title', flat=True)
pages = list(pages)
self.assertEqual(pages, ['a', 'b1', 'c1', 'c2'])
def test_draft_qs_access(self):
result = get_visible_pages(self.request, Page.objects.drafts(), self.site)
pages = Page.objects.filter(id__in=result).values_list('title_set__title', flat=True)
pages = list(pages)
self.assertEqual(pages, ['a', 'b1', 'c1', 'c2'])
def test_public_qs_access(self):
result = get_visible_pages(self.request, Page.objects.public(), self.site)
pages = Page.objects.filter(id__in=result).values_list('title_set__title', flat=True)
pages = list(pages)
self.assertEqual(pages, ['a', 'b1', 'c1', 'c2'])
@override_settings(CMS_PERMISSION=False)
class SoftrootTests(CMSTestCase):
"""
Ask evildmp/superdmp if you don't understand softroots!
Softroot description from the docs:
A soft root is a page that acts as the root for a menu navigation tree.
Typically, this will be a page that is the root of a significant new
section on your site.
When the soft root feature is enabled, the navigation menu for any page
will start at the nearest soft root, rather than at the real root of
the site’s page hierarchy.
This feature is useful when your site has deep page hierarchies (and
therefore multiple levels in its navigation trees). In such a case, you
usually don’t want to present site visitors with deep menus of nested
items.
For example, you’re on the page “Introduction to Bleeding”, so the menu
might look like this:
School of Medicine
Medical Education
Departments
Department of Lorem Ipsum
Department of Donec Imperdiet
Department of Cras Eros
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <this is the current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
Department of Curabitur a Purus
Department of Sed Accumsan
Department of Etiam
Research
Administration
Contact us
Impressum
which is frankly overwhelming.
By making “Department of Mediaeval Surgery” a soft root, the menu
becomes much more manageable:
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
"""
def test_basic_home(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "Home" (0 100 100 100):
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
people = create_page("People", parent=home, **stdkwargs)
# On Home
context = self.get_context(home.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
homenode = nodes[0]
self.assertEqual(homenode.id, home.publisher_public.pk)
self.assertEqual(len(homenode.children), 2)
projectsnode, peoplenode = homenode.children
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(peoplenode.id, people.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
self.assertEqual(len(peoplenode.children), 0)
def test_basic_projects(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "Projects" (0 100 100 100):
|- Projects (SOFTROOT)
| |- django CMS
| |- django Shop
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
create_page("People", parent=home, **stdkwargs)
# On Projects
context = self.get_context(projects.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
projectsnode = nodes[0]
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
def test_basic_djangocms(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "django CMS" (0 100 100 100):
|- Projects (SOFTROOT)
| |- django CMS
| |- django Shop
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
create_page("People", parent=home, **stdkwargs)
# On django CMS
context = self.get_context(djangocms.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
projectsnode = nodes[0]
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
def test_basic_people(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "People" (0 100 100 100):
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
people = create_page("People", parent=home, **stdkwargs)
# On People
context = self.get_context(home.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
homenode = nodes[0]
self.assertEqual(homenode.id, home.publisher_public.pk)
self.assertEqual(len(homenode.children), 2)
projectsnode, peoplenode = homenode.children
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(peoplenode.id, people.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
self.assertEqual(len(peoplenode.children), 0)
| bsd-3-clause |
kaste/mockito-python | tests/modulefunctions_test.py | 1 | 3675 | # Copyright (c) 2008-2016 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from .test_base import TestBase
from mockito import when, unstub, verify, any
from mockito.invocation import InvocationError
from mockito.verification import VerificationError
class ModuleFunctionsTest(TestBase):
def tearDown(self):
unstub()
def testUnstubs(self):
when(os.path).exists("test").thenReturn(True)
unstub()
self.assertEqual(False, os.path.exists("test"))
def testStubs(self):
when(os.path).exists("test").thenReturn(True)
self.assertEqual(True, os.path.exists("test"))
def testStubsConsecutiveCalls(self):
when(os.path).exists("test").thenReturn(False).thenReturn(True)
self.assertEqual(False, os.path.exists("test"))
self.assertEqual(True, os.path.exists("test"))
def testStubsMultipleClasses(self):
when(os.path).exists("test").thenReturn(True)
when(os.path).dirname(any(str)).thenReturn("mocked")
self.assertEqual(True, os.path.exists("test"))
self.assertEqual("mocked", os.path.dirname("whoah!"))
def testVerifiesSuccesfully(self):
when(os.path).exists("test").thenReturn(True)
os.path.exists("test")
verify(os.path).exists("test")
def testFailsVerification(self):
when(os.path).exists("test").thenReturn(True)
self.assertRaises(VerificationError, verify(os.path).exists, "test")
def testFailsOnNumberOfCalls(self):
when(os.path).exists("test").thenReturn(True)
os.path.exists("test")
self.assertRaises(VerificationError, verify(os.path, times=2).exists,
"test")
def testStubsTwiceAndUnstubs(self):
when(os.path).exists("test").thenReturn(False)
when(os.path).exists("test").thenReturn(True)
self.assertEqual(True, os.path.exists("test"))
unstub()
self.assertEqual(False, os.path.exists("test"))
def testStubsTwiceWithDifferentArguments(self):
when(os.path).exists("Foo").thenReturn(False)
when(os.path).exists("Bar").thenReturn(True)
self.assertEqual(False, os.path.exists("Foo"))
self.assertEqual(True, os.path.exists("Bar"))
def testShouldThrowIfWeStubAFunctionNotDefinedInTheModule(self):
self.assertRaises(InvocationError,
lambda: when(os).walk_the_line().thenReturn(None))
def testEnsureWeCanMockTheClassOnAModule(self):
from . import module
when(module).Foo().thenReturn('mocked')
assert module.Foo() == 'mocked'
| mit |
meyskld/hammerhead_mr1 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
mauriceling/dose | examples/09_no_migration_isolated_mating.py | 2 | 5817 | '''
Example 09a: Examining the effects of no migration on genetic distance
differences from an initially identical population (development of sub-
populations or demes which may lead to speciation)
This example is identical to Example 03, except background mutation rate is
changed from 10% in Example 03 to 0.1% in this example.
In this simulation,
- 1 populations of 1250 organisms
- each organism will have 1 chromosome of only 2 bases (1 and 0)
- Evenly deployed across 25 eco-cells (50 organism per eco-cell)
- 0.1% background point mutation on chromosome of 50 bases
- no organism movement throughout the simulation
- no Ragaraja interpretation of genome
- 1000 generations to be simulated
'''
# needed to run this example without prior
# installation of DOSE into Python site-packages
try:
import run_examples_without_installation
except ImportError: pass
# Example codes starts from here
import dose, random
parameters = {
"simulation_name": "09_no_migration_isolated_mating",
"population_names": ['pop_01'],
"population_locations": [[(x,y,z)
for x in range(5)
for y in range(5)
for z in range(1)]],
"deployment_code": 3,
"chromosome_bases": ['0','1'],
"background_mutation": 0.001,
"additional_mutation": 0,
"mutation_type": 'point',
"chromosome_size": 5000,
"genome_size": 1,
"max_tape_length": 50,
"clean_cell": True,
"interpret_chromosome": False,
"max_codon": 2000,
"population_size": 1250,
"eco_cell_capacity": 50,
"world_x": 5,
"world_y": 5,
"world_z": 1,
"goal": 0,
"maximum_generations": 1000,
"fossilized_ratio": 0.01,
"fossilized_frequency": 100,
"print_frequency": 10,
"ragaraja_version": 0,
"ragaraja_instructions": ['000', '001', '010',
'011', '100', '101'],
"eco_buried_frequency": 1250,
"database_file": "sim09_no_migration.db",
"database_logging_frequency": 1
}
class simulation_functions(dose.dose_functions):
def organism_movement(self, Populations, pop_name, World): pass
def organism_location(self, Populations, pop_name, World): pass
def ecoregulate(self, World): pass
def update_ecology(self, World, x, y, z): pass
def update_local(self, World, x, y, z): pass
def report(self, World): pass
def fitness(self, Populations, pop_name): pass
def mutation_scheme(self, organism):
organism.genome[0].rmutate(parameters["mutation_type"],
parameters["additional_mutation"])
def prepopulation_control(self, Populations, pop_name): pass
def mating(self, Populations, pop_name):
for location in parameters["population_locations"][0]:
group = dose.filter_location(location, Populations[pop_name].agents)
for x in range(len(group)//2):
parents = []
for i in range(2):
parents.append(random.choice(Populations[pop_name].agents))
while parents[i] not in group:
parents[i] = random.choice(Populations[pop_name].agents)
Populations[pop_name].agents.remove(parents[i])
crossover_pt = random.randint(0, len(parents[0].genome[0].sequence))
(new_chromo1, new_chromo2) = dose.genetic.crossover(parents[0].genome[0],
parents[1].genome[0],
crossover_pt)
children = [dose.genetic.Organism([new_chromo1],
parameters["mutation_type"],
parameters["additional_mutation"]),
dose.genetic.Organism([new_chromo2],
parameters["mutation_type"],
parameters["additional_mutation"])]
for child in children:
child.status['parents'] = [parents[0].status['identity'],
parents[1].status['identity']]
child.status['location'] = location
child.generate_name()
child.status['deme'] = pop_name
Populations[pop_name].agents.append(child)
def postpopulation_control(self, Populations, pop_name): pass
def generation_events(self, Populations, pop_name): pass
def population_report(self, Populations, pop_name):
report_list = []
for organism in Populations[pop_name].agents:
identity = str(organism.status['identity'])
report_list.append(identity)
return '\n'.join(report_list)
def database_report(self, con, cur, start_time,
Populations, World, generation_count):
try:
dose.database_report_populations(con, cur, start_time,
Populations, generation_count)
except: pass
try:
dose.database_report_world(con, cur, start_time,
World, generation_count)
except: pass
def deployment_scheme(self, Populations, pop_name, World): pass
dose.simulate(parameters, simulation_functions)
| gpl-3.0 |
gvir/dailyreader | venv/Lib/site-packages/pip/basecommand.py | 392 | 6578 | """Base Command class, and related routines"""
import os
import sys
import tempfile
import traceback
import time
import optparse
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.log import logger
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.backwardcompat import StringIO
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR)
from pip.util import get_prog
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = False
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
def _build_session(self, options):
session = PipSession()
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.add_consumers((logger.DEBUG, log_fp))
else:
log_fp = None
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except PreviousBuildDirError:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if store_log:
log_file_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_file_fp = open_logfile(log_file_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_file_fn = temp.name
log_file_fp = open_logfile(log_file_fn, 'w')
logger.fatal('Storing debug log for failure in %s' % log_file_fn)
log_file_fp.write(text)
log_file_fp.close()
if log_fp is not None:
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
| mit |
armersong/zato | code/alembic/versions/0009_45c5b38b620e_gh229_msg_paths.py | 7 | 1780 | """gh229 Msg paths
Revision ID: 0009_45c5b38b620e
Revises: 0008_4eb66feec2a6
Create Date: 2013-11-24 17:05:50.526032
"""
# revision identifiers, used by Alembic.
revision = '0009_45c5b38b620e'
down_revision = '0008_4eb66feec2a6'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import CreateSequence, DropSequence
# Zato
from zato.common.odb import model
# ################################################################################################################################
def upgrade():
op.execute(CreateSequence(sa.Sequence('msg_xpath_seq')))
op.create_table(
model.XPath.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('msg_xpath_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('value', sa.String(1500), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('name','cluster_id')
)
op.execute(CreateSequence(sa.Sequence('msg_json_pointer_seq')))
op.create_table(
model.JSONPointer.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('msg_json_pointer_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('value', sa.String(1500), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('name', 'cluster_id')
)
def downgrade():
op.drop_table(model.XPath.__tablename__)
op.execute(DropSequence(sa.Sequence('msg_xpath_seq')))
op.drop_table(model.JSONPointer.__tablename__)
op.execute(DropSequence(sa.Sequence('msg_json_pointer_seq')))
| gpl-3.0 |
kkmonlee/Project-Euler-Solutions | Python/p412v1.py | 1 | 1413 | from kkmonleeUtils import EulerTools
import math
upperlim = 75000000
primes = EulerTools.primes_sieve(upperlim)
factor_dict = {}
# Compute how many of each prime factors are there in 75000000!
for p in primes:
num_factors = 0
q = p
while upperlim // q > 0:
num_factors += upperlim // q
q *= p
factor_dict[p] = num_factors
young_dict = {}
# Now count how many of each prime factor are the product
# of the Young tableau
index = 0
while primes[index] < 20000:
p = primes[index]
degree = 1
young_dict[p] = 0
while p ** degree < 20000:
mult = 1
while p ** degree * mult < 20000:
if p ** degree * mult <= 5000:
young_dict[p] += 2 * p ** degree * mult
elif p ** degree * mult < 10000:
young_dict[p] += 2 * (10000 - p ** degree * mult)
elif 10000 < p ** degree * mult <= 15000:
young_dict[p] += p ** degree * mult - 10000
elif p ** degree * mult > 15000:
young_dict[p] += 20000 - p ** degree * mult
mult += 1
degree += 1
index += 1
answer = 1
for k in factor_dict.keys():
if k in young_dict:
mult = EulerTools.fast_exp(k, factor_dict[k] - young_dict[k], 76543217)
else:
mult = EulerTools.fast_exp(k, factor_dict[k], 76543217)
answer *= mult
answer = answer % 76543217
print(answer)
| gpl-3.0 |
boooka/GeoPowerOff | venv/lib/python2.7/site-packages/django/db/backends/mysql/creation.py | 70 | 3036 | from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
test_settings = self.connection.settings_dict['TEST']
if test_settings['CHARSET']:
suffix.append('CHARACTER SET %s' % test_settings['CHARSET'])
if test_settings['COLLATION']:
suffix.append('COLLATE %s' % test_settings['COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, model, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_destroy_indexes_for_fields(self, model, fields, style):
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
field_names = []
qn = self.connection.ops.quote_name
for f in fields:
field_names.append(style.SQL_FIELD(qn(f.column)))
index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
from ..utils import truncate_name
return [
style.SQL_KEYWORD("DROP INDEX") + " " +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
style.SQL_KEYWORD("ON") + " " +
style.SQL_TABLE(qn(model._meta.db_table)) + ";",
]
| apache-2.0 |
asacamano/keyczar | cpp/src/tools/swtoolkit/test/visual_studio_solution_test.py | 18 | 3689 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Visual studio solution test (MEDIUM test)."""
import sys
import TestFramework
def TestSConstruct(scons_globals):
"""Test SConstruct file.
Args:
scons_globals: Global variables dict from the SConscript file.
"""
# Get globals from SCons
Environment = scons_globals['Environment']
base_env = Environment(tools=['component_setup'])
base_env.Append(BUILD_COMPONENTS=['SConscript'])
windows_env = base_env.Clone(
tools=['target_platform_windows', 'visual_studio_solution'],
BUILD_TYPE='dbg',
BUILD_TYPE_DESCRIPTION='Debug Windows build',
)
windows_env.Append(BUILD_GROUPS=['default'])
BuildComponents([windows_env])
# Solution and target projects
s = windows_env.Solution('test_sln', [windows_env])
windows_env.Alias('solution', s)
sconscript_contents = """
Import('env')
env.ComponentProgram('hello', 'hello.c')
env.ComponentLibrary('foo', 'foo.c')
"""
hello_c_contents = """
#include <stdio.h>
int main() {
printf("Hello, world!\\n");
return 0;
}
"""
foo_c_contents = """
int test(int a, int b) {
return a + b;
}
"""
expect_stdout = """scons: Reading SConscript files ...
scons: done reading SConscript files.
scons: Building targets ...
Adding 'test_sln - dbg|Win32' to 'test_sln.vcproj'
Adding 'test_sln - dbg|Win32' to 'test_sln.sln'
scons: done building targets.
"""
def main():
test = TestFramework.TestFramework()
# Test only applies to Windows
if sys.platform not in ('win32', 'cygwin'):
test.skip_test('This test only applies to windows.\n')
return
base = 'hello/'
test.subdir(base)
test.WriteSConscript(base + 'SConstruct', TestSConstruct)
test.write(base + 'SConscript', sconscript_contents)
test.write(base + 'hello.c', hello_c_contents)
test.write(base + 'foo.c', foo_c_contents)
test.subdir(base + 'bar')
test.write(base + 'bar/bar.cpp', foo_c_contents)
test.run(chdir=base, options='solution', stdout=expect_stdout)
# Check that all solutions and projects were generated.
test.must_exist(base + 'test_sln.sln')
test.must_exist(base + 'test_sln.vcproj')
test.pass_test()
if __name__ == '__main__':
main()
| apache-2.0 |
ioam/topographica | topo/tests/reference/common_control.py | 3 | 3736 | ### This file can't be used on its own (see e.g. lissom_or_reference)
### NOTE: c++ lissom does not output unsituated weights, so a function
### in lissom_log_parser guesses how to unsituate the weights. If your
### weights contains rows or columns of zeros, this guessing will fail.
from topo.tests.reference.lissom_log_parser import check_weights,check_activities,check_size
from math import ceil
def _check_proj(s,p,N):
# to match save_all_units.command
step = int(ceil(N/20.0))
if step>2 and step%2==1:
step+=1
# check all sizes
try:
for i in range(0,N,step):
for j in range(0,N,step):
check_size(s,p,(i,j),display=verbose)
except AssertionError, st:
return "%s: %s\n"%(s,st)
try:
for i in range(0,N,step):
for j in range(0,N,step):
check_weights(s,p,(i,j),display=verbose)
return 0
except AssertionError, st:
return "%s: %s\n"%(s,st)
def check(weights=True,activities=True):
errs = ""
if weights:
try:
check_all_weights()
except AssertionError,we:
errs+=we.args[0]+"\n"
if activities:
try:
check_all_activities()
except AssertionError,ae:
errs+=ae.args[0]+"\n"
if len(errs)>0:
raise AssertionError("\n"+errs)
def check_all_weights():
print "t=%s: Checking weights..."%topo.sim.time()
e = ""
# assumes 'Primary'
for proj in topo.sim['Primary'].projections():
print "...%s"%proj
o =_check_proj('Primary',proj,BaseN)
if o!=0:e+=o
if len(e)>0:
raise AssertionError("The following weights did not match:\n%s"%e)
def check_all_activities():
print "t=%s: Checking activities..."%topo.sim.time()
sheets = sorted(topo.sim.objects().values(), cmp=lambda x, y:
cmp(x.precedence,
y.precedence))
errs = ""
for s in sheets:
print "...%s"%s.name
try:
check_activities(s.name,display=verbose)
except AssertionError, st:
errs+=st.args[0]+"\n"
if len(errs)>0:
raise AssertionError("The following activities did not match:\n%s"%errs)
## try:
## check_activities(s.name,display=verbose)
## except AssertionError, st:
## prjns = sorted(topo.sim[s.name].in_connections)[::-1]
## e = ""
## for pr in prjns:
## print "Checking %s."%pr.name
## o =_check_proj(s.name,pr.name,BaseN)
## if o!=0:e+=o
## raise AssertionError("%s (If any incoming projection did not match, it will be listed below.)\n%s\n"%(st,e))
# hack
L = locals()
def run_comparisons(l):
# * times mark scheduled actions
L.update(l)
check(activities=False) #0 *
for i in range(5):
topo.sim.run(1)
check()
topo.sim.run(95) #100
check()
topo.sim.run(98) #198
check()
topo.sim.run(2) #200 *
check()
topo.sim.run(150) #350
check()
topo.sim.run(150) #500 *
check()
topo.sim.run(300) #800
check()
topo.sim.run(200) # 1000 *
check()
# CB: this stop_at_1000 stuff is a temporary hack; when topographica's
# faster, I'm not going to need it.
if not stop_at_1000:
for i in range(4): # to 5000 *
topo.sim.run(1000)
check()
topo.sim.run(1500) # 6500 *
check()
topo.sim.run(1500) # 8000 *
check()
topo.sim.run(5000) # 13000
check()
topo.sim.run(3000) # 16000
check()
topo.sim.run(4000) # 20000 *
check()
| bsd-3-clause |
Praxyk/Praxyk-Clients | libs/python/praxyk/paginated.py | 2 | 5431 | #!/usr/bin/env python
## @auth John Allard, Nick Church, others
## @date Oct 2015
## @github https://github.com/jhallard/praxyk
## @license MIT
import os, sys, json, requests
import subprocess, argparse, getpass, urlparse
import datetime as dt
from praxyk_exception import PraxykException
from base import PraxykBase
# @info - This is the base class for all other classes in the Praxyk python library.
# It serves to hold data and functions that are common to all classes of the
# library. Examples of this are base url's for routing, pre-set http headers,
# and functions for return error checking. We also hold user auth info
class Paginated(PraxykBase) :
def __init__(self, pagination=None, first_page_num=None, last_page_num=None, page=None,
prev_page_num=None, next_page_num=None, page_size=None, *args, **kwargs) :
super(Paginated, self).__init__(*args, **kwargs)
self.next_page_num = next_page_num
self.prev_page_num = prev_page_num
self.last_page_num = last_page_num
self.first_page_num = first_page_num
self.page_size = page_size
self.pagination = pagination
self.page = page
def get(self, url, payload, pagination=None, page_size=None, page=None, *args, **kwargs) :
if pagination : self.pagination = pagination
if page_size : self.page_size = page_size
if page : self.page = page
if self.page_size : payload['page_size'] = self.page_size
if self.page : payload['page'] = self.page
if self.pagination : payload['pagination'] = self.pagination
try :
response = super(Paginated, self).get(url, payload, *args, **kwargs)
if response :
self.next_page_num = self.get_params_from_url(response.get('next_page', "")).get('page', None)
self.prev_page_num = self.get_params_from_url(response.get('prev_page', "")).get('page', None)
self.first_page_num = self.get_params_from_url(response.get('first_page', "")).get('page', None)
self.last_page_num = self.get_params_from_url(response.get('last_page', "")).get('page', None)
self.next_page_num = int(self.next_page_num[0]) if self.next_page_num else None
self.prev_page_num = int(self.prev_page_num[0]) if self.prev_page_num else None
self.first_page_num = int(self.first_page_num[0]) if self.first_page_num else None
self.last_page_num = int(self.last_page_num[0]) if self.last_page_num else None
self.page = response.get('page', {}).get('page_number', page)
return response
except Exception as e :
print str(e)
raise PraxykException(message="GET Request Failed in Paginated Class. URL (%s)" % url)
return None
# @info - these next four functions can be used after a page of results has already been obtained via the get function.
# When that function is called, the results returned contain links to the next page, prev page, first page,
# and last page of the transactions. We store those page numbers and make them accessable via these functions, ex:
# tr = Transactions(user_id=45, auth_token=XXXX); tr.get(); trans_1 = tr.transactions; tr.next_page(); trans_2 = tr.transactions
def next_page(self) :
payload = {'token' : self.auth_token}
try :
if self.next_page_num:
self.page = int(self.next_page_num)
self.pagination = True
return self.get()
except Exception as e :
sys.stderr.write(str(e))
return None
def prev_page(self) :
payload = {'token' : self.auth_token}
try :
if self.prev_page_num:
self.page = int(self.prev_page_num)
self.pagination = True
return self.get()
except Exception as e :
sys.stderr.write(str(e))
return None
def last_page(self) :
payload = {'token' : self.auth_token}
try :
if self.last_page_num:
self.page = int(self.last_page_num)
self.pagination = True
return self.get()
except Exception as e :
sys.stderr.write(str(e))
return None
def first_page(self) :
payload = {'token' : self.auth_token}
try :
if self.first_page_num :
self.page = int(self.first_page_num)
self.pagination = True
return self.get()
except Exception as e :
sys.stderr.write(str(e))
return None
def to_dict(self) :
base_dict = super(Paginated, self).to_dict()
updated = {
'page' : self.page,
'pagination' : self.pagination,
'page_size' : self.page_size,
'next_page' : self.next_page_num,
'prev_page' : self.prev_page_num,
'last_page' : self.last_page_num,
'first_page' : self.first_page_num
}
base_dict.update(updated)
return base_dict
| mit |
jessefeinman/FintechHackathon | venv/Lib/encodings/cp720.py | 270 | 13686 | """Python Character Mapping Codec cp720 generated on Windows:
Vista 6.0.6002 SP2 Multiprocessor Free with the command:
python Tools/unicode/genwincodec.py 720
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp720',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\x80'
'\x81'
'\xe9' # 0x82 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x83 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\x84'
'\xe0' # 0x85 -> LATIN SMALL LETTER A WITH GRAVE
'\x86'
'\xe7' # 0x87 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x88 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x89 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x8A -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x8B -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x8C -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\x8d'
'\x8e'
'\x8f'
'\x90'
'\u0651' # 0x91 -> ARABIC SHADDA
'\u0652' # 0x92 -> ARABIC SUKUN
'\xf4' # 0x93 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xa4' # 0x94 -> CURRENCY SIGN
'\u0640' # 0x95 -> ARABIC TATWEEL
'\xfb' # 0x96 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x97 -> LATIN SMALL LETTER U WITH GRAVE
'\u0621' # 0x98 -> ARABIC LETTER HAMZA
'\u0622' # 0x99 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0x9A -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0x9B -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\xa3' # 0x9C -> POUND SIGN
'\u0625' # 0x9D -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0x9E -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0x9F -> ARABIC LETTER ALEF
'\u0628' # 0xA0 -> ARABIC LETTER BEH
'\u0629' # 0xA1 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xA2 -> ARABIC LETTER TEH
'\u062b' # 0xA3 -> ARABIC LETTER THEH
'\u062c' # 0xA4 -> ARABIC LETTER JEEM
'\u062d' # 0xA5 -> ARABIC LETTER HAH
'\u062e' # 0xA6 -> ARABIC LETTER KHAH
'\u062f' # 0xA7 -> ARABIC LETTER DAL
'\u0630' # 0xA8 -> ARABIC LETTER THAL
'\u0631' # 0xA9 -> ARABIC LETTER REH
'\u0632' # 0xAA -> ARABIC LETTER ZAIN
'\u0633' # 0xAB -> ARABIC LETTER SEEN
'\u0634' # 0xAC -> ARABIC LETTER SHEEN
'\u0635' # 0xAD -> ARABIC LETTER SAD
'\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0xB0 -> LIGHT SHADE
'\u2592' # 0xB1 -> MEDIUM SHADE
'\u2593' # 0xB2 -> DARK SHADE
'\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0xB5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0xB6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0xB8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0xBD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0xBE -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0xC6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0xC7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0xCF -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0xD0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0xD1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0xD2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0xD3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0xD4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0xD5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0xD6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0xD7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0xD8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0xDB -> FULL BLOCK
'\u2584' # 0xDC -> LOWER HALF BLOCK
'\u258c' # 0xDD -> LEFT HALF BLOCK
'\u2590' # 0xDE -> RIGHT HALF BLOCK
'\u2580' # 0xDF -> UPPER HALF BLOCK
'\u0636' # 0xE0 -> ARABIC LETTER DAD
'\u0637' # 0xE1 -> ARABIC LETTER TAH
'\u0638' # 0xE2 -> ARABIC LETTER ZAH
'\u0639' # 0xE3 -> ARABIC LETTER AIN
'\u063a' # 0xE4 -> ARABIC LETTER GHAIN
'\u0641' # 0xE5 -> ARABIC LETTER FEH
'\xb5' # 0xE6 -> MICRO SIGN
'\u0642' # 0xE7 -> ARABIC LETTER QAF
'\u0643' # 0xE8 -> ARABIC LETTER KAF
'\u0644' # 0xE9 -> ARABIC LETTER LAM
'\u0645' # 0xEA -> ARABIC LETTER MEEM
'\u0646' # 0xEB -> ARABIC LETTER NOON
'\u0647' # 0xEC -> ARABIC LETTER HEH
'\u0648' # 0xED -> ARABIC LETTER WAW
'\u0649' # 0xEE -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xEF -> ARABIC LETTER YEH
'\u2261' # 0xF0 -> IDENTICAL TO
'\u064b' # 0xF1 -> ARABIC FATHATAN
'\u064c' # 0xF2 -> ARABIC DAMMATAN
'\u064d' # 0xF3 -> ARABIC KASRATAN
'\u064e' # 0xF4 -> ARABIC FATHA
'\u064f' # 0xF5 -> ARABIC DAMMA
'\u0650' # 0xF6 -> ARABIC KASRA
'\u2248' # 0xF7 -> ALMOST EQUAL TO
'\xb0' # 0xF8 -> DEGREE SIGN
'\u2219' # 0xF9 -> BULLET OPERATOR
'\xb7' # 0xFA -> MIDDLE DOT
'\u221a' # 0xFB -> SQUARE ROOT
'\u207f' # 0xFC -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0xFD -> SUPERSCRIPT TWO
'\u25a0' # 0xFE -> BLACK SQUARE
'\xa0' # 0xFF -> NO-BREAK SPACE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-2-clause |
tcmitchell/geni-ch | plugins/chapiv1rpc/chapi/DelegateBase.py | 2 | 4206 | #----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# Base class for delegate bases that want to authenticate, authorize,
# Return GENI-style returns
import tools.pluginmanager as pm
# from amsoil.config import expand_amsoil_path
from exceptions import *
from Exceptions import *
import traceback
import gcf.geni.util.cred_util
class DelegateBase(object):
def __init__(self, logger):
self.logger = logger
def auth(self, client_cert, credentials, slice_urn=None, privileges=()):
# check variables
if not isinstance(privileges, tuple):
raise TypeError("Privileges need to be a tuple.")
# collect credentials (only GENI certs, version ignored)
geni_credentials = []
for c in credentials:
if c['geni_type'] == 'geni_sfa':
geni_credentials.append(c['geni_value'])
# get the cert_root
config = pm.getService("config")
# cert_root = expand_amsoil_path(config.get("chapiv1rpc.ch_cert_root"))
cert_root = config.get("chapiv1rpc.ch_cert_root")
if client_cert == None:
# work around if the certificate could not be acquired due to the shortcommings of the werkzeug library
if config.get("flask.debug"):
import gcf.sfa.trust.credential as cred
client_cert = cred.Credential(string=geni_credentials[0]).gidCaller.save_to_string(save_parents=True)
else:
raise CHAPIv1ForbiddenError("Could not determine the client SSL certificate")
# test the credential
try:
cred_verifier = gcf.geni.cred_util.CredentialVerifier(cert_root)
cred_verifier.verify_from_strings(client_cert, geni_credentials, slice_urn, privileges)
except Exception as e:
raise CHAPIv1ForbiddenError(str(e))
user_gid = gid.GID(string=client_cert)
user_urn = user_gid.get_urn()
user_uuid = user_gid.get_uuid()
user_email = user_gid.get_email()
return user_urn, user_uuid, user_email # TODO document return
def _errorReturn(self, e):
"""Assembles a GENI compliant return result for faulty methods."""
if not isinstance(e, CHAPIv1BaseError): # convert common errors into CHAPIv1GeneralError
e = CHAPIv1ServerError(str(e))
# do some logging
self.logger.error(e)
self.logger.error(traceback.format_exc())
return {'code' : e.code , 'value' : None, 'output' : str(e) }
def _successReturn(self, result):
"""Assembles a GENI compliant return result for successful methods."""
return { 'code' : 0 , 'value' : result, 'output' : '' }
def subcall_options(self, options):
"""Generate options dictionary for subordinate calls to other
clearinghouse services.
"""
sopt = dict()
sfkeys = ['ENVIRON', 'speaking_for']
for sfkey in sfkeys:
if sfkey in options:
sopt[sfkey] = options[sfkey]
return sopt
| mit |
aschn/goodtechgigs | docs/conf.py | 1 | 7831 | # -*- coding: utf-8 -*-
#
# goodtechgigs documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'goodtechgigs'
copyright = u"2015, Anna Schneider"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'goodtechgigsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'goodtechgigs.tex',
u'goodtechgigs Documentation',
u"Anna Schneider", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'goodtechgigs', u'goodtechgigs Documentation',
[u"Anna Schneider"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'goodtechgigs', u'goodtechgigs Documentation',
u"Anna Schneider", 'goodtechgigs',
'Tech gigs for good.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| apache-2.0 |
sergiusens/snapcraft | snapcraft/storeapi/assertions.py | 3 | 9865 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import subprocess
from copy import deepcopy
from datetime import datetime
from . import StoreClient
from . import errors
from . import constants
class _BaseAssertion:
"""Private Base class to handle assertions.
Implementations are supposed to define a class attribute to determine
the assertion type.
:cvar str _assertion_type: the assertion type, also treated as the endpoint
for the assertion on the store and the payload
header key for the returned data.
"""
@property
def publisher_id(self):
"""Return the publisher-id of a snap.
This entry is also known as accound-id or developer-id.
The value is lazily fetched from the store.
"""
if not self._account_info:
self._account_info = self._store_client.get_account_information()
return self._account_info["account_id"]
@property
def snap_id(self):
"""Return the snap-id of the given snap_name.
The value is lazily fetched from the store.
"""
if not self._account_info:
self._account_info = self._store_client.get_account_information()
snaps = self._account_info["snaps"][self._release]
try:
return snaps[self._snap_name]["snap-id"]
except KeyError:
raise errors.SnapNotFoundError(self._snap_name)
def __init__(self, *, snap_name, signing_key=None):
"""Create an instance to handle an assertion.
:param str snap_name: snap name to handle assertion for.
:param str signing_key: the name of the key to use, if not
provided, the default key is used.
:ivar dict assertion: holds the actual assertion.
:ivar dict signed_assertion: holds a signed version of assertion.
"""
self._store_client = StoreClient()
self._snap_name = snap_name
self._signing_key = signing_key
self._release = constants.DEFAULT_SERIES
self._account_info = None
self.assertion = None
self.signed_assertion = None
def get(self):
"""Get the current assertion from the store.
:returns: the assertion corresponding to the snap_name.
:rtype: dict
"""
# The store adds a header key which is not consistent with the endpoint
# which we need to pop as it is not understood by snap sign.
if self.assertion:
return self.assertion
store_assertion = self._store_client.get_assertion(
self.snap_id, self._assertion_type
)
self.assertion = list(store_assertion.values())[0]
return self.assertion
def sign(self):
"""Create a signed version of the obtained assertion.
:returns: signed assertion document.
"""
cmdline = ["snap", "sign"]
if self._signing_key:
cmdline += ["-k", self._signing_key]
snap_sign = subprocess.Popen(
cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
data = json.dumps(self.assertion).encode("utf8")
assertion, err = snap_sign.communicate(input=data)
if snap_sign.returncode != 0:
err = err.decode("ascii", errors="replace")
raise errors.StoreAssertionError(
snap_name=self._snap_name, endpoint=self._assertion_type, error=err
)
self.signed_assertion = assertion
return assertion
def push(self, force=False):
"""Push the assertion to the store, signing if necessary.
:param bool force: if True, ignore any conflict with revoked developers
and the snap revisions it would invalidate.
:returns: None
"""
if not self.signed_assertion:
self.sign()
self._store_client.push_assertion(
self.snap_id, self.signed_assertion, self._assertion_type, force=force
)
class DeveloperAssertion(_BaseAssertion):
"""Implementation of a developer assertion.
The assertion is used to enable collaboration for a given snap_name
by updating a snap-id's developer assertion using the store endpoint.
The assertion content has the following structure
{
'type': 'snap-developer',
'authority-id': '<account_id of the publisher or store authority-id>',
'publisher-id': '<account_id of the publisher>',
'snap-id': '<snap-id>',
'developers': [{
'developer-id': '<account-id>',
'since': '2017-02-10T08:35:00.390258Z'
},{
'developer-id': '<account-id>',
'since': '2017-02-10T08:35:00.390258Z',
'until': '2018-02-10T08:35:00.390258Z'
}],
}
"""
_assertion_type = "developers"
def new_assertion(self, *, developers):
"""Create a new assertion with developers based out of the current one.
The new assertion has its assertion's authority-id normalized to the
assertion's publisher_id and the assertion's revision increased by 1.
:param list developers: a list of a dictionary developers holding the
keys: developer_id (mandatory), since
(mandatory) and until (optional).
:returns: a new assertion based out of the current assertion with the
provided developers list.
:rtype: DeveloperAssertion
"""
new_assertion = deepcopy(self.assertion)
new_assertion["developers"] = developers
# The revision should be incremented, to avoid `invalid-revision`
# errors.
new_assertion["revision"] = str(int(self.assertion.get("revision", "-1")))
# There is a possibility that the `authority-id` to be `canonical`,
# which should be changed to the `publisher_id` to match the signing
# key.
new_assertion["authority-id"] = self.publisher_id
new_instance = DeveloperAssertion(
snap_name=self._snap_name, signing_key=self._signing_key
)
# Reference the already fetched information
new_instance._account_info = self._account_info
new_instance.assertion = new_assertion
return new_instance
def get(self):
"""Return a dict containing the developer assertion for snap_name.
The data that comes from the store query looks as follows:
{'snap_developer': {
'type': 'snap-developer',
'authority-id': <account_id of the publisher or
store authority-id>,
'publisher-id': <account_id of the publisher>,
'snap-id': 'snap_id',
'developers': [{
'developer-id': 'account_id of dev-1',
'since': '2017-02-10T08:35:00.390258Z'
},{
'developer-id': 'account_id of dev-2',
'since': '2017-02-10T08:35:00.390258Z',
'until': '2018-02-10T08:35:00.390258Z'
}],
}
}
The assertion is saved without the snap_developer header.
:returns: the latest developer assertion corresponding to snap_name.
:rtype: dict
"""
try:
self.assertion = super().get()
except errors.StoreValidationError as e:
if e.error_list[0]["code"] != "snap-developer-not-found":
raise
self.assertion = {
"type": "snap-developer",
"authority-id": self.publisher_id,
"publisher-id": self.publisher_id,
"snap-id": self.snap_id,
"developers": [],
}
# A safeguard to operate easily on the assertion
if "developers" not in self.assertion:
self.assertion["developers"] = []
return self.assertion
def get_developers(self):
"""Return a copy of the current developers listed in the assertion.
:returns: a list of developers.
:rtype: list
"""
return self.get()["developers"].copy()
def is_equal(self, other_assertion):
"""Determine equality of developer lists in each assertion.
During the comparison, differences in milliseconds are not considered.
:returns: Return True if the list of developers in this instances
assertion list is equal to the list from other_assertion.
:rtype: bool
"""
this_devs = self._normalize_time(self.assertion["developers"].copy())
other_devs = self._normalize_time(
other_assertion.assertion["developers"].copy()
)
return this_devs == other_devs
def _normalize_time(self, developers):
for dev in developers:
for range_ in ["since", "until"]:
if range_ in dev:
date = datetime.strptime(dev[range_], "%Y-%m-%dT%H:%M:%S.%fZ")
dev[range_] = date.strftime("%Y-%m-%dT%H:%M:%S.000000Z")
return developers
| gpl-3.0 |
captainpete/rethinkdb | external/gtest_1.6.0/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| agpl-3.0 |
gmist/kay-ru | kay/lib/babel/dates.py | 6 | 39600 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Locale dependent formatting and parsing of dates and times.
The default locale for the functions in this module is determined by the
following environment variables, in that order:
* ``LC_TIME``,
* ``LC_ALL``, and
* ``LANG``
"""
from __future__ import division
from datetime import date, datetime, time, timedelta, tzinfo
import re
from babel.core import default_locale, get_global, Locale
from babel.util import UTC
__all__ = ['format_date', 'format_datetime', 'format_time', 'format_timedelta',
'get_timezone_name', 'parse_date', 'parse_datetime', 'parse_time']
__docformat__ = 'restructuredtext en'
LC_TIME = default_locale('LC_TIME')
# Aliases for use in scopes where the modules are shadowed by local variables
date_ = date
datetime_ = datetime
time_ = time
def get_period_names(locale=LC_TIME):
"""Return the names for day periods (AM/PM) used by the locale.
>>> get_period_names(locale='en_US')['am']
u'AM'
:param locale: the `Locale` object, or a locale string
:return: the dictionary of period names
:rtype: `dict`
"""
return Locale.parse(locale).periods
def get_day_names(width='wide', context='format', locale=LC_TIME):
"""Return the day names used by the locale for the specified format.
>>> get_day_names('wide', locale='en_US')[1]
u'Tuesday'
>>> get_day_names('abbreviated', locale='es')[1]
u'mar'
>>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1]
u'D'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
:return: the dictionary of day names
:rtype: `dict`
"""
return Locale.parse(locale).days[context][width]
def get_month_names(width='wide', context='format', locale=LC_TIME):
"""Return the month names used by the locale for the specified format.
>>> get_month_names('wide', locale='en_US')[1]
u'January'
>>> get_month_names('abbreviated', locale='es')[1]
u'ene'
>>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1]
u'J'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
:return: the dictionary of month names
:rtype: `dict`
"""
return Locale.parse(locale).months[context][width]
def get_quarter_names(width='wide', context='format', locale=LC_TIME):
"""Return the quarter names used by the locale for the specified format.
>>> get_quarter_names('wide', locale='en_US')[1]
u'1st quarter'
>>> get_quarter_names('abbreviated', locale='de_DE')[1]
u'Q1'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
:return: the dictionary of quarter names
:rtype: `dict`
"""
return Locale.parse(locale).quarters[context][width]
def get_era_names(width='wide', locale=LC_TIME):
"""Return the era names used by the locale for the specified format.
>>> get_era_names('wide', locale='en_US')[1]
u'Anno Domini'
>>> get_era_names('abbreviated', locale='de_DE')[1]
u'n. Chr.'
:param width: the width to use, either "wide", "abbreviated", or "narrow"
:param locale: the `Locale` object, or a locale string
:return: the dictionary of era names
:rtype: `dict`
"""
return Locale.parse(locale).eras[width]
def get_date_format(format='medium', locale=LC_TIME):
"""Return the date formatting patterns used by the locale for the specified
format.
>>> get_date_format(locale='en_US')
<DateTimePattern u'MMM d, y'>
>>> get_date_format('full', locale='de_DE')
<DateTimePattern u'EEEE, d. MMMM y'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
:return: the date format pattern
:rtype: `DateTimePattern`
"""
return Locale.parse(locale).date_formats[format]
def get_datetime_format(format='medium', locale=LC_TIME):
"""Return the datetime formatting patterns used by the locale for the
specified format.
>>> get_datetime_format(locale='en_US')
u'{1} {0}'
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
:return: the datetime format pattern
:rtype: `unicode`
"""
patterns = Locale.parse(locale).datetime_formats
if format not in patterns:
format = None
return patterns[format]
def get_time_format(format='medium', locale=LC_TIME):
"""Return the time formatting patterns used by the locale for the specified
format.
>>> get_time_format(locale='en_US')
<DateTimePattern u'h:mm:ss a'>
>>> get_time_format('full', locale='de_DE')
<DateTimePattern u'HH:mm:ss zzzz'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
:return: the time format pattern
:rtype: `DateTimePattern`
"""
return Locale.parse(locale).time_formats[format]
def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME):
"""Return the timezone associated with the given `datetime` object formatted
as string indicating the offset from GMT.
>>> dt = datetime(2007, 4, 1, 15, 30)
>>> get_timezone_gmt(dt, locale='en')
u'GMT+00:00'
>>> from pytz import timezone
>>> tz = timezone('America/Los_Angeles')
>>> dt = datetime(2007, 4, 1, 15, 30, tzinfo=tz)
>>> get_timezone_gmt(dt, locale='en')
u'GMT-08:00'
>>> get_timezone_gmt(dt, 'short', locale='en')
u'-0800'
The long format depends on the locale, for example in France the acronym
UTC string is used instead of GMT:
>>> get_timezone_gmt(dt, 'long', locale='fr_FR')
u'UTC-08:00'
:param datetime: the ``datetime`` object; if `None`, the current date and
time in UTC is used
:param width: either "long" or "short"
:param locale: the `Locale` object, or a locale string
:return: the GMT offset representation of the timezone
:rtype: `unicode`
:since: version 0.9
"""
if datetime is None:
datetime = datetime_.utcnow()
elif isinstance(datetime, (int, long)):
datetime = datetime_.utcfromtimestamp(datetime).time()
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
locale = Locale.parse(locale)
offset = datetime.utcoffset()
seconds = offset.days * 24 * 60 * 60 + offset.seconds
hours, seconds = divmod(seconds, 3600)
if width == 'short':
pattern = u'%+03d%02d'
else:
pattern = locale.zone_formats['gmt'] % '%+03d:%02d'
return pattern % (hours, seconds // 60)
def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME):
"""Return a representation of the given timezone using "location format".
The result depends on both the local display name of the country and the
city assocaited with the time zone:
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> get_timezone_location(tz, locale='de_DE')
u"Kanada (St. John's)"
>>> tz = timezone('America/Mexico_City')
>>> get_timezone_location(tz, locale='de_DE')
u'Mexiko (Mexiko-Stadt)'
If the timezone is associated with a country that uses only a single
timezone, just the localized country name is returned:
>>> tz = timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Deutschland'
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if `None`, the current date and time in
UTC is assumed
:param locale: the `Locale` object, or a locale string
:return: the localized timezone name using location format
:rtype: `unicode`
:since: version 0.9
"""
if dt_or_tzinfo is None or isinstance(dt_or_tzinfo, (int, long)):
dt = None
tzinfo = UTC
elif isinstance(dt_or_tzinfo, (datetime, time)):
dt = dt_or_tzinfo
if dt.tzinfo is not None:
tzinfo = dt.tzinfo
else:
tzinfo = UTC
else:
dt = None
tzinfo = dt_or_tzinfo
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt or datetime.utcnow())
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
info = locale.time_zones.get(zone, {})
# Otherwise, if there is only one timezone for the country, return the
# localized country name
region_format = locale.zone_formats['region']
territory = get_global('zone_territories').get(zone)
if territory not in locale.territories:
territory = 'ZZ' # invalid/unknown
territory_name = locale.territories[territory]
if territory and len(get_global('territory_zones').get(territory, [])) == 1:
return region_format % (territory_name)
# Otherwise, include the city in the output
fallback_format = locale.zone_formats['fallback']
if 'city' in info:
city_name = info['city']
else:
metazone = get_global('meta_zones').get(zone)
metazone_info = locale.meta_zones.get(metazone, {})
if 'city' in metazone_info:
city_name = metainfo['city']
elif '/' in zone:
city_name = zone.split('/', 1)[1].replace('_', ' ')
else:
city_name = zone.replace('_', ' ')
return region_format % (fallback_format % {
'0': city_name,
'1': territory_name
})
def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False,
locale=LC_TIME):
r"""Return the localized display name for the given timezone. The timezone
may be specified using a ``datetime`` or `tzinfo` object.
>>> from pytz import timezone
>>> dt = time(15, 30, tzinfo=timezone('America/Los_Angeles'))
>>> get_timezone_name(dt, locale='en_US')
u'Pacific Standard Time'
>>> get_timezone_name(dt, width='short', locale='en_US')
u'PST'
If this function gets passed only a `tzinfo` object and no concrete
`datetime`, the returned display name is indenpendent of daylight savings
time. This can be used for example for selecting timezones, or to set the
time of events that recur across DST changes:
>>> tz = timezone('America/Los_Angeles')
>>> get_timezone_name(tz, locale='en_US')
u'Pacific Time'
>>> get_timezone_name(tz, 'short', locale='en_US')
u'PT'
If no localized display name for the timezone is available, and the timezone
is associated with a country that uses only a single timezone, the name of
that country is returned, formatted according to the locale:
>>> tz = timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Deutschland'
>>> get_timezone_name(tz, locale='pt_BR')
u'Hor\xe1rio Alemanha'
On the other hand, if the country uses multiple timezones, the city is also
included in the representation:
>>> tz = timezone('America/St_Johns')
>>> get_timezone_name(tz, locale='de_DE')
u"Kanada (St. John's)"
The `uncommon` parameter can be set to `True` to enable the use of timezone
representations that are not commonly used by the requested locale. For
example, while in frensh the central europian timezone is usually
abbreviated as "HEC", in Canadian French, this abbreviation is not in
common use, so a generic name would be chosen by default:
>>> tz = timezone('Europe/Paris')
>>> get_timezone_name(tz, 'short', locale='fr_CA')
u'France'
>>> get_timezone_name(tz, 'short', uncommon=True, locale='fr_CA')
u'HEC'
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if a ``tzinfo`` object is used, the
resulting display name will be generic, i.e.
independent of daylight savings time; if `None`, the
current date in UTC is assumed
:param width: either "long" or "short"
:param uncommon: whether even uncommon timezone abbreviations should be used
:param locale: the `Locale` object, or a locale string
:return: the timezone display name
:rtype: `unicode`
:since: version 0.9
:see: `LDML Appendix J: Time Zone Display Names
<http://www.unicode.org/reports/tr35/#Time_Zone_Fallback>`_
"""
if dt_or_tzinfo is None or isinstance(dt_or_tzinfo, (int, long)):
dt = None
tzinfo = UTC
elif isinstance(dt_or_tzinfo, (datetime, time)):
dt = dt_or_tzinfo
if dt.tzinfo is not None:
tzinfo = dt.tzinfo
else:
tzinfo = UTC
else:
dt = None
tzinfo = dt_or_tzinfo
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt)
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
info = locale.time_zones.get(zone, {})
# Try explicitly translated zone names first
if width in info:
if dt is None:
field = 'generic'
else:
dst = tzinfo.dst(dt)
if dst is None:
field = 'generic'
elif dst == 0:
field = 'standard'
else:
field = 'daylight'
if field in info[width]:
return info[width][field]
metazone = get_global('meta_zones').get(zone)
if metazone:
metazone_info = locale.meta_zones.get(metazone, {})
if width in metazone_info and (uncommon or metazone_info.get('common')):
if dt is None:
field = 'generic'
else:
field = tzinfo.dst(dt) and 'daylight' or 'standard'
if field in metazone_info[width]:
return metazone_info[width][field]
# If we have a concrete datetime, we assume that the result can't be
# independent of daylight savings time, so we return the GMT offset
if dt is not None:
return get_timezone_gmt(dt, width=width, locale=locale)
return get_timezone_location(dt_or_tzinfo, locale=locale)
def format_date(date=None, format='medium', locale=LC_TIME):
"""Return a date formatted according to the given pattern.
>>> d = date(2007, 04, 01)
>>> format_date(d, locale='en_US')
u'Apr 1, 2007'
>>> format_date(d, format='full', locale='de_DE')
u'Sonntag, 1. April 2007'
If you don't want to use the locale default formats, you can specify a
custom date pattern:
>>> format_date(d, "EEE, MMM d, ''yy", locale='en')
u"Sun, Apr 1, '07"
:param date: the ``date`` or ``datetime`` object; if `None`, the current
date is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param locale: a `Locale` object or a locale identifier
:rtype: `unicode`
:note: If the pattern contains time fields, an `AttributeError` will be
raised when trying to apply the formatting. This is also true if
the value of ``date`` parameter is actually a ``datetime`` object,
as this function automatically converts that to a ``date``.
"""
if date is None:
date = date_.today()
elif isinstance(date, datetime):
date = date.date()
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_date_format(format, locale=locale)
pattern = parse_pattern(format)
return parse_pattern(format).apply(date, locale)
def format_datetime(datetime=None, format='medium', tzinfo=None,
locale=LC_TIME):
r"""Return a date formatted according to the given pattern.
>>> dt = datetime(2007, 04, 01, 15, 30)
>>> format_datetime(dt, locale='en_US')
u'Apr 1, 2007 3:30:00 PM'
For any pattern requiring the display of the time-zone, the third-party
``pytz`` package is needed to explicitly specify the time-zone:
>>> from pytz import timezone
>>> format_datetime(dt, 'full', tzinfo=timezone('Europe/Paris'),
... locale='fr_FR')
u'dimanche 1 avril 2007 17:30:00 Heure avanc\xe9e de l\u2019Europe centrale'
>>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz",
... tzinfo=timezone('US/Eastern'), locale='en')
u'2007.04.01 AD at 11:30:00 EDT'
:param datetime: the `datetime` object; if `None`, the current date and
time is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the timezone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
:rtype: `unicode`
"""
if datetime is None:
datetime = datetime_.utcnow()
elif isinstance(datetime, (int, long)):
datetime = datetime_.utcfromtimestamp(datetime)
elif isinstance(datetime, time):
datetime = datetime_.combine(date.today(), datetime)
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
if tzinfo is not None:
datetime = datetime.astimezone(tzinfo)
if hasattr(tzinfo, 'normalize'): # pytz
datetime = tzinfo.normalize(datetime)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
return get_datetime_format(format, locale=locale) \
.replace('{0}', format_time(datetime, format, tzinfo=None,
locale=locale)) \
.replace('{1}', format_date(datetime, format, locale=locale))
else:
return parse_pattern(format).apply(datetime, locale)
def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME):
r"""Return a time formatted according to the given pattern.
>>> t = time(15, 30)
>>> format_time(t, locale='en_US')
u'3:30:00 PM'
>>> format_time(t, format='short', locale='de_DE')
u'15:30'
If you don't want to use the locale default formats, you can specify a
custom time pattern:
>>> format_time(t, "hh 'o''clock' a", locale='en')
u"03 o'clock PM"
For any pattern requiring the display of the time-zone, the third-party
``pytz`` package is needed to explicitly specify the time-zone:
>>> from pytz import timezone
>>> t = datetime(2007, 4, 1, 15, 30)
>>> tzinfo = timezone('Europe/Paris')
>>> t = tzinfo.localize(t)
>>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR')
u'15:30:00 Heure avanc\xe9e de l\u2019Europe centrale'
>>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=timezone('US/Eastern'),
... locale='en')
u"09 o'clock AM, Eastern Daylight Time"
As that example shows, when this function gets passed a
``datetime.datetime`` value, the actual time in the formatted string is
adjusted to the timezone specified by the `tzinfo` parameter. If the
``datetime`` is "naive" (i.e. it has no associated timezone information),
it is assumed to be in UTC.
These timezone calculations are **not** performed if the value is of type
``datetime.time``, as without date information there's no way to determine
what a given time would translate to in a different timezone without
information about whether daylight savings time is in effect or not. This
means that time values are left as-is, and the value of the `tzinfo`
parameter is only used to display the timezone name if needed:
>>> t = time(15, 30)
>>> format_time(t, format='full', tzinfo=timezone('Europe/Paris'),
... locale='fr_FR')
u'15:30:00 Heure normale de l\u2019Europe centrale'
>>> format_time(t, format='full', tzinfo=timezone('US/Eastern'),
... locale='en_US')
u'3:30:00 PM Eastern Standard Time'
:param time: the ``time`` or ``datetime`` object; if `None`, the current
time in UTC is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the time-zone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
:rtype: `unicode`
:note: If the pattern contains date fields, an `AttributeError` will be
raised when trying to apply the formatting. This is also true if
the value of ``time`` parameter is actually a ``datetime`` object,
as this function automatically converts that to a ``time``.
"""
if time is None:
time = datetime.utcnow()
elif isinstance(time, (int, long)):
time = datetime.utcfromtimestamp(time)
if time.tzinfo is None:
time = time.replace(tzinfo=UTC)
if isinstance(time, datetime):
if tzinfo is not None:
time = time.astimezone(tzinfo)
if hasattr(tzinfo, 'normalize'): # pytz
time = tzinfo.normalize(time)
time = time.timetz()
elif tzinfo is not None:
time = time.replace(tzinfo=tzinfo)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_time_format(format, locale=locale)
return parse_pattern(format).apply(time, locale)
TIMEDELTA_UNITS = (
('year', 3600 * 24 * 365),
('month', 3600 * 24 * 30),
('week', 3600 * 24 * 7),
('day', 3600 * 24),
('hour', 3600),
('minute', 60),
('second', 1)
)
def format_timedelta(delta, granularity='second', threshold=.85, locale=LC_TIME):
"""Return a time delta according to the rules of the given locale.
>>> format_timedelta(timedelta(weeks=12), locale='en_US')
u'3 mths'
>>> format_timedelta(timedelta(seconds=1), locale='es')
u'1 s'
The granularity parameter can be provided to alter the lowest unit
presented, which defaults to a second.
>>> format_timedelta(timedelta(hours=3), granularity='day',
... locale='en_US')
u'1 day'
The threshold parameter can be used to determine at which value the
presentation switches to the next higher unit. A higher threshold factor
means the presentation will switch later. For example:
>>> format_timedelta(timedelta(hours=23), threshold=0.9, locale='en_US')
u'1 day'
>>> format_timedelta(timedelta(hours=23), threshold=1.1, locale='en_US')
u'23 hrs'
:param delta: a ``timedelta`` object representing the time difference to
format, or the delta in seconds as an `int` value
:param granularity: determines the smallest unit that should be displayed,
the value can be one of "year", "month", "week", "day",
"hour", "minute" or "second"
:param threshold: factor that determines at which point the presentation
switches to the next higher unit
:param locale: a `Locale` object or a locale identifier
:rtype: `unicode`
"""
if isinstance(delta, timedelta):
seconds = int((delta.days * 86400) + delta.seconds)
else:
seconds = delta
locale = Locale.parse(locale)
for unit, secs_per_unit in TIMEDELTA_UNITS:
value = abs(seconds) / secs_per_unit
if value >= threshold or unit == granularity:
if unit == granularity and value > 0:
value = max(1, value)
value = int(round(value))
plural_form = locale.plural_form(value)
pattern = locale._data['unit_patterns'][unit][plural_form]
return pattern.replace('{0}', str(value))
return u''
def parse_date(string, locale=LC_TIME):
"""Parse a date from a string.
This function uses the date format for the locale as a hint to determine
the order in which the date fields appear in the string.
>>> parse_date('4/1/04', locale='en_US')
datetime.date(2004, 4, 1)
>>> parse_date('01.04.2004', locale='de_DE')
datetime.date(2004, 4, 1)
:param string: the string containing the date
:param locale: a `Locale` object or a locale identifier
:return: the parsed date
:rtype: `date`
"""
# TODO: try ISO format first?
format = get_date_format(locale=locale).pattern.lower()
year_idx = format.index('y')
month_idx = format.index('m')
if month_idx < 0:
month_idx = format.index('l')
day_idx = format.index('d')
indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: this currently only supports numbers, but should also support month
# names, both in the requested locale, and english
numbers = re.findall('(\d+)', string)
year = numbers[indexes['Y']]
if len(year) == 2:
year = 2000 + int(year)
else:
year = int(year)
month = int(numbers[indexes['M']])
day = int(numbers[indexes['D']])
if month > 12:
month, day = day, month
return date(year, month, day)
def parse_datetime(string, locale=LC_TIME):
"""Parse a date and time from a string.
This function uses the date and time formats for the locale as a hint to
determine the order in which the time fields appear in the string.
:param string: the string containing the date and time
:param locale: a `Locale` object or a locale identifier
:return: the parsed date/time
:rtype: `datetime`
"""
raise NotImplementedError
def parse_time(string, locale=LC_TIME):
"""Parse a time from a string.
This function uses the time format for the locale as a hint to determine
the order in which the time fields appear in the string.
>>> parse_time('15:30:00', locale='en_US')
datetime.time(15, 30)
:param string: the string containing the time
:param locale: a `Locale` object or a locale identifier
:return: the parsed time
:rtype: `time`
"""
# TODO: try ISO format first?
format = get_time_format(locale=locale).pattern.lower()
hour_idx = format.index('h')
if hour_idx < 0:
hour_idx = format.index('k')
min_idx = format.index('m')
sec_idx = format.index('s')
indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: support 12 hour clock, and 0-based hour specification
# and seconds should be optional, maybe minutes too
# oh, and time-zones, of course
numbers = re.findall('(\d+)', string)
hour = int(numbers[indexes['H']])
minute = int(numbers[indexes['M']])
second = int(numbers[indexes['S']])
return time(hour, minute, second)
class DateTimePattern(object):
def __init__(self, pattern, format):
self.pattern = pattern
self.format = format
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.pattern)
def __unicode__(self):
return self.pattern
def __mod__(self, other):
if type(other) is not DateTimeFormat:
return NotImplemented
return self.format % other
def apply(self, datetime, locale):
return self % DateTimeFormat(datetime, locale)
class DateTimeFormat(object):
def __init__(self, value, locale):
assert isinstance(value, (date, datetime, time))
if isinstance(value, (datetime, time)) and value.tzinfo is None:
value = value.replace(tzinfo=UTC)
self.value = value
self.locale = Locale.parse(locale)
def __getitem__(self, name):
char = name[0]
num = len(name)
if char == 'G':
return self.format_era(char, num)
elif char in ('y', 'Y', 'u'):
return self.format_year(char, num)
elif char in ('Q', 'q'):
return self.format_quarter(char, num)
elif char in ('M', 'L'):
return self.format_month(char, num)
elif char in ('w', 'W'):
return self.format_week(char, num)
elif char == 'd':
return self.format(self.value.day, num)
elif char == 'D':
return self.format_day_of_year(num)
elif char == 'F':
return self.format_day_of_week_in_month()
elif char in ('E', 'e', 'c'):
return self.format_weekday(char, num)
elif char == 'a':
return self.format_period(char)
elif char == 'h':
if self.value.hour % 12 == 0:
return self.format(12, num)
else:
return self.format(self.value.hour % 12, num)
elif char == 'H':
return self.format(self.value.hour, num)
elif char == 'K':
return self.format(self.value.hour % 12, num)
elif char == 'k':
if self.value.hour == 0:
return self.format(24, num)
else:
return self.format(self.value.hour, num)
elif char == 'm':
return self.format(self.value.minute, num)
elif char == 's':
return self.format(self.value.second, num)
elif char == 'S':
return self.format_frac_seconds(num)
elif char == 'A':
return self.format_milliseconds_in_day(num)
elif char in ('z', 'Z', 'v', 'V'):
return self.format_timezone(char, num)
else:
raise KeyError('Unsupported date/time field %r' % char)
def format_era(self, char, num):
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)]
era = int(self.value.year >= 0)
return get_era_names(width, self.locale)[era]
def format_year(self, char, num):
value = self.value.year
if char.isupper():
week = self.get_week_number(self.get_day_of_year())
if week == 0:
value -= 1
year = self.format(value, num)
if num == 2:
year = year[-2:]
return year
def format_quarter(self, char, num):
quarter = (self.value.month - 1) // 3 + 1
if num <= 2:
return ('%%0%dd' % num) % quarter
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'Q': 'format', 'q': 'stand-alone'}[char]
return get_quarter_names(width, context, self.locale)[quarter]
def format_month(self, char, num):
if num <= 2:
return ('%%0%dd' % num) % self.value.month
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'M': 'format', 'L': 'stand-alone'}[char]
return get_month_names(width, context, self.locale)[self.value.month]
def format_week(self, char, num):
if char.islower(): # week of year
day_of_year = self.get_day_of_year()
week = self.get_week_number(day_of_year)
if week == 0:
date = self.value - timedelta(days=day_of_year)
week = self.get_week_number(self.get_day_of_year(date),
date.weekday())
return self.format(week, num)
else: # week of month
week = self.get_week_number(self.value.day)
if week == 0:
date = self.value - timedelta(days=self.value.day)
week = self.get_week_number(date.day, date.weekday())
pass
return '%d' % week
def format_weekday(self, char, num):
if num < 3:
if char.islower():
value = 7 - self.locale.first_week_day + self.value.weekday()
return self.format(value % 7 + 1, num)
num = 3
weekday = self.value.weekday()
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {3: 'format', 4: 'format', 5: 'stand-alone'}[num]
return get_day_names(width, context, self.locale)[weekday]
def format_day_of_year(self, num):
return self.format(self.get_day_of_year(), num)
def format_day_of_week_in_month(self):
return '%d' % ((self.value.day - 1) // 7 + 1)
def format_period(self, char):
period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)]
return get_period_names(locale=self.locale)[period]
def format_frac_seconds(self, num):
value = str(self.value.microsecond)
return self.format(round(float('.%s' % value), num) * 10**num, num)
def format_milliseconds_in_day(self, num):
msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \
self.value.minute * 60000 + self.value.hour * 3600000
return self.format(msecs, num)
def format_timezone(self, char, num):
width = {3: 'short', 4: 'long'}[max(3, num)]
if char == 'z':
return get_timezone_name(self.value, width, locale=self.locale)
elif char == 'Z':
return get_timezone_gmt(self.value, width, locale=self.locale)
elif char == 'v':
return get_timezone_name(self.value.tzinfo, width,
locale=self.locale)
elif char == 'V':
if num == 1:
return get_timezone_name(self.value.tzinfo, width,
uncommon=True, locale=self.locale)
return get_timezone_location(self.value.tzinfo, locale=self.locale)
def format(self, value, length):
return ('%%0%dd' % length) % value
def get_day_of_year(self, date=None):
if date is None:
date = self.value
return (date - date_(date.year, 1, 1)).days + 1
def get_week_number(self, day_of_period, day_of_week=None):
"""Return the number of the week of a day within a period. This may be
the week number in a year or the week number in a month.
Usually this will return a value equal to or greater than 1, but if the
first week of the period is so short that it actually counts as the last
week of the previous period, this function will return 0.
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE'))
>>> format.get_week_number(6)
1
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US'))
>>> format.get_week_number(6)
2
:param day_of_period: the number of the day in the period (usually
either the day of month or the day of year)
:param day_of_week: the week day; if ommitted, the week day of the
current date is assumed
"""
if day_of_week is None:
day_of_week = self.value.weekday()
first_day = (day_of_week - self.locale.first_week_day -
day_of_period + 1) % 7
if first_day < 0:
first_day += 7
week_number = (day_of_period + first_day - 1) // 7
if 7 - first_day >= self.locale.min_week_days:
week_number += 1
return week_number
PATTERN_CHARS = {
'G': [1, 2, 3, 4, 5], # era
'y': None, 'Y': None, 'u': None, # year
'Q': [1, 2, 3, 4], 'q': [1, 2, 3, 4], # quarter
'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month
'w': [1, 2], 'W': [1], # week
'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day
'E': [1, 2, 3, 4, 5], 'e': [1, 2, 3, 4, 5], 'c': [1, 3, 4, 5], # week day
'a': [1], # period
'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour
'm': [1, 2], # minute
's': [1, 2], 'S': None, 'A': None, # second
'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4], 'v': [1, 4], 'V': [1, 4] # zone
}
def parse_pattern(pattern):
"""Parse date, time, and datetime format patterns.
>>> parse_pattern("MMMMd").format
u'%(MMMM)s%(d)s'
>>> parse_pattern("MMM d, yyyy").format
u'%(MMM)s %(d)s, %(yyyy)s'
Pattern can contain literal strings in single quotes:
>>> parse_pattern("H:mm' Uhr 'z").format
u'%(H)s:%(mm)s Uhr %(z)s'
An actual single quote can be used by using two adjacent single quote
characters:
>>> parse_pattern("hh' o''clock'").format
u"%(hh)s o'clock"
:param pattern: the formatting pattern to parse
"""
if type(pattern) is DateTimePattern:
return pattern
result = []
quotebuf = None
charbuf = []
fieldchar = ['']
fieldnum = [0]
def append_chars():
result.append(''.join(charbuf).replace('%', '%%'))
del charbuf[:]
def append_field():
limit = PATTERN_CHARS[fieldchar[0]]
if limit and fieldnum[0] not in limit:
raise ValueError('Invalid length for field: %r'
% (fieldchar[0] * fieldnum[0]))
result.append('%%(%s)s' % (fieldchar[0] * fieldnum[0]))
fieldchar[0] = ''
fieldnum[0] = 0
for idx, char in enumerate(pattern.replace("''", '\0')):
if quotebuf is None:
if char == "'": # quote started
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
quotebuf = []
elif char in PATTERN_CHARS:
if charbuf:
append_chars()
if char == fieldchar[0]:
fieldnum[0] += 1
else:
if fieldchar[0]:
append_field()
fieldchar[0] = char
fieldnum[0] = 1
else:
if fieldchar[0]:
append_field()
charbuf.append(char)
elif quotebuf is not None:
if char == "'": # end of quote
charbuf.extend(quotebuf)
quotebuf = None
else: # inside quote
quotebuf.append(char)
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
return DateTimePattern(pattern, u''.join(result).replace('\0', "'"))
| bsd-3-clause |
thurt/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Tools/scripts/methfix.py | 96 | 5463 | #! /usr/bin/env python
# Fix Python source files to avoid using
# def method(self, (arg1, ..., argn)):
# instead of the more rational
# def method(self, arg1, ..., argn):
#
# Command line arguments are files or directories to be processed.
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
# arguments). Of course, the original file is kept as a back-up
# (with a "~" attached to its name).
# It complains about binaries (files containing null bytes)
# and about files that are ostensibly not Python files: if the first
# line starts with '#!' and does not contain the string 'python'.
#
# Changes made are reported to stdout in a diff-like format.
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
# and uses regular expressions. Also note several subtleties like
# preserving the file's mode and avoiding to even write a temp file
# when no changes are needed for a file.
#
# NB: by changing only the function fixline() you can turn this
# into a program for a different change to Python programs...
import sys
import re
import os
from stat import *
err = sys.stderr.write
dbg = err
rep = sys.stdout.write
def main():
bad = 0
if not sys.argv[1:]: # No arguments
err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
sys.exit(2)
for arg in sys.argv[1:]:
if os.path.isdir(arg):
if recursedown(arg): bad = 1
elif os.path.islink(arg):
err(arg + ': will not process symbolic links\n')
bad = 1
else:
if fix(arg): bad = 1
sys.exit(bad)
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
def ispython(name):
return ispythonprog.match(name) >= 0
def recursedown(dirname):
dbg('recursedown(%r)\n' % (dirname,))
bad = 0
try:
names = os.listdir(dirname)
except os.error, msg:
err('%s: cannot list directory: %r\n' % (dirname, msg))
return 1
names.sort()
subdirs = []
for name in names:
if name in (os.curdir, os.pardir): continue
fullname = os.path.join(dirname, name)
if os.path.islink(fullname): pass
elif os.path.isdir(fullname):
subdirs.append(fullname)
elif ispython(name):
if fix(fullname): bad = 1
for fullname in subdirs:
if recursedown(fullname): bad = 1
return bad
def fix(filename):
## dbg('fix(%r)\n' % (filename,))
try:
f = open(filename, 'r')
except IOError, msg:
err('%s: cannot open: %r\n' % (filename, msg))
return 1
head, tail = os.path.split(filename)
tempname = os.path.join(head, '@' + tail)
g = None
# If we find a match, we rewind the file and start over but
# now copy everything to a temp file.
lineno = 0
while 1:
line = f.readline()
if not line: break
lineno = lineno + 1
if g is None and '\0' in line:
# Check for binary files
err(filename + ': contains null bytes; not fixed\n')
f.close()
return 1
if lineno == 1 and g is None and line[:2] == '#!':
# Check for non-Python scripts
words = line[2:].split()
if words and re.search('[pP]ython', words[0]) < 0:
msg = filename + ': ' + words[0]
msg = msg + ' script; not fixed\n'
err(msg)
f.close()
return 1
while line[-2:] == '\\\n':
nextline = f.readline()
if not nextline: break
line = line + nextline
lineno = lineno + 1
newline = fixline(line)
if newline != line:
if g is None:
try:
g = open(tempname, 'w')
except IOError, msg:
f.close()
err('%s: cannot create: %r\n' % (tempname, msg))
return 1
f.seek(0)
lineno = 0
rep(filename + ':\n')
continue # restart from the beginning
rep(repr(lineno) + '\n')
rep('< ' + line)
rep('> ' + newline)
if g is not None:
g.write(newline)
# End of file
f.close()
if not g: return 0 # No changes
# Finishing touch -- move files
# First copy the file's mode to the temp file
try:
statbuf = os.stat(filename)
os.chmod(tempname, statbuf[ST_MODE] & 07777)
except os.error, msg:
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
# Then make a backup of the original file as filename~
try:
os.rename(filename, filename + '~')
except os.error, msg:
err('%s: warning: backup failed (%r)\n' % (filename, msg))
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
except os.error, msg:
err('%s: rename failed (%r)\n' % (filename, msg))
return 1
# Return succes
return 0
fixpat = '^[ \t]+def +[a-zA-Z0-9_]+ *( *self *, *(( *(.*) *)) *) *:'
fixprog = re.compile(fixpat)
def fixline(line):
if fixprog.match(line) >= 0:
(a, b), (c, d) = fixprog.regs[1:3]
line = line[:a] + line[c:d] + line[b:]
return line
if __name__ == '__main__':
main()
| apache-2.0 |
vprabu/iotsdk | c/lib/3rdparty/libxml2-2.7.8/python/tests/outbuf.py | 87 | 3102 | #!/usr/bin/python -u
import sys
import libxml2
import StringIO
def testSimpleBufferWrites():
f = StringIO.StringIO()
buf = libxml2.createOutputBuffer(f, "ISO-8859-1")
buf.write(3, "foo")
buf.writeString("bar")
buf.close()
if f.getvalue() != "foobar":
print "Failed to save to StringIO"
sys.exit(1)
def testSaveDocToBuffer():
"""
Regression test for bug #154294.
"""
input = '<foo>Hello</foo>'
expected = '''\
<?xml version="1.0" encoding="UTF-8"?>
<foo>Hello</foo>
'''
f = StringIO.StringIO()
buf = libxml2.createOutputBuffer(f, 'UTF-8')
doc = libxml2.parseDoc(input)
doc.saveFileTo(buf, 'UTF-8')
doc.freeDoc()
if f.getvalue() != expected:
print 'xmlDoc.saveFileTo() call failed.'
print ' got: %s' % repr(f.getvalue())
print 'expected: %s' % repr(expected)
sys.exit(1)
def testSaveFormattedDocToBuffer():
input = '<outer><inner>Some text</inner><inner/></outer>'
# The formatted and non-formatted versions of the output.
expected = ('''\
<?xml version="1.0" encoding="UTF-8"?>
<outer><inner>Some text</inner><inner/></outer>
''', '''\
<?xml version="1.0" encoding="UTF-8"?>
<outer>
<inner>Some text</inner>
<inner/>
</outer>
''')
doc = libxml2.parseDoc(input)
for i in (0, 1):
f = StringIO.StringIO()
buf = libxml2.createOutputBuffer(f, 'UTF-8')
doc.saveFormatFileTo(buf, 'UTF-8', i)
if f.getvalue() != expected[i]:
print 'xmlDoc.saveFormatFileTo() call failed.'
print ' got: %s' % repr(f.getvalue())
print 'expected: %s' % repr(expected[i])
sys.exit(1)
doc.freeDoc()
def testSaveIntoOutputBuffer():
"""
Similar to the previous two tests, except this time we invoke the save
methods on the output buffer object and pass in an XML node object.
"""
input = '<foo>Hello</foo>'
expected = '''\
<?xml version="1.0" encoding="UTF-8"?>
<foo>Hello</foo>
'''
f = StringIO.StringIO()
doc = libxml2.parseDoc(input)
buf = libxml2.createOutputBuffer(f, 'UTF-8')
buf.saveFileTo(doc, 'UTF-8')
if f.getvalue() != expected:
print 'outputBuffer.saveFileTo() call failed.'
print ' got: %s' % repr(f.getvalue())
print 'expected: %s' % repr(expected)
sys.exit(1)
f = StringIO.StringIO()
buf = libxml2.createOutputBuffer(f, 'UTF-8')
buf.saveFormatFileTo(doc, 'UTF-8', 1)
if f.getvalue() != expected:
print 'outputBuffer.saveFormatFileTo() call failed.'
print ' got: %s' % repr(f.getvalue())
print 'expected: %s' % repr(expected)
sys.exit(1)
doc.freeDoc()
if __name__ == '__main__':
# Memory debug specific
libxml2.debugMemory(1)
testSimpleBufferWrites()
testSaveDocToBuffer()
testSaveFormattedDocToBuffer()
testSaveIntoOutputBuffer()
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| bsd-3-clause |
ODM2/YODA-Tools | yodatools/dataloader/view/clsDBConfig.py | 2 | 6433 | # -*- coding: utf-8 -*-
###########################################################################
# Python code generated with wxFormBuilder (version Jun 5 2014)
# http://www.wxformbuilder.org/
#
# TODO: (Is this something we should follow?) PLEASE DO 'NOT' EDIT THIS FILE!
###########################################################################
import wx
###########################################################################
# Class clsDBConfiguration
###########################################################################
class clsDBConfiguration(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, id=wx.ID_ANY,
pos=wx.DefaultPosition,
size=wx.Size(500, 291),
style=wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL)
self.SetMinSize(wx.Size(442, 291))
self.SetMaxSize(wx.Size(627, 291))
formSizer = wx.BoxSizer(wx.VERTICAL)
sbSizer = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, 'Database Connection'), wx.VERTICAL) # noqa
connectionSizer = wx.FlexGridSizer(0, 2, 0, 15)
connectionSizer.AddGrowableCol(1)
connectionSizer.SetFlexibleDirection(wx.VERTICAL)
connectionSizer.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_ALL)
self.stVersion = wx.StaticText(self, wx.ID_ANY, 'DB Version:', wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT) # noqa
self.stVersion.Wrap(-1)
connectionSizer.Add(self.stVersion, 0, wx.ALL | wx.ALIGN_RIGHT | wx.EXPAND, 5) # noqa
cbDatabaseType1Choices = ['2.0']#, '1.1.1']
self.cbDatabaseType1 = wx.ComboBox(self, wx.ID_ANY, '2.0', wx.DefaultPosition, wx.DefaultSize, cbDatabaseType1Choices, wx.CB_READONLY ) # noqa
self.cbDatabaseType1.SetSelection(1)
connectionSizer.Add(self.cbDatabaseType1, 1, wx.ALL | wx.EXPAND, 5)
self.stConnType = wx.StaticText(self, wx.ID_ANY, 'Connection Type:', wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT) # noqa
self.stConnType.Wrap(-1)
connectionSizer.Add(self.stConnType, 0, wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, 5) # noqa
cbDatabaseTypeChoices = []
self.cbDatabaseType = wx.ComboBox(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, cbDatabaseTypeChoices, wx.CB_READONLY) # noqa
connectionSizer.Add(self.cbDatabaseType, 1, wx.ALL | wx.EXPAND, 5)
self.stServer = wx.StaticText(self, wx.ID_ANY, 'Server:', wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT) # noqa
self.stServer.Wrap(-1)
connectionSizer.Add(self.stServer, 0, wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, 5) # noqa
self.txtServer = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 | wx.FULL_REPAINT_ON_RESIZE | wx.SIMPLE_BORDER) # noqa
connectionSizer.Add(self.txtServer, 1, wx.ALL | wx.EXPAND, 5)
self.stDBName = wx.StaticText(self, wx.ID_ANY, 'Database:', wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT) # noqa
self.stDBName.Wrap(-1)
self.stDBName.SetFont(wx.Font(wx.NORMAL_FONT.GetPointSize(), 70, 90, 90, False, wx.EmptyString)) # noqa
connectionSizer.Add(self.stDBName, 0, wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, 5) # noqa
self.txtDBName = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 | wx.SIMPLE_BORDER) # noqa
connectionSizer.Add(self.txtDBName, 1, wx.ALL | wx.EXPAND, 5)
self.stUser = wx.StaticText(self, wx.ID_ANY, 'User:', wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT) # noqa
self.stUser.Wrap(-1)
self.stUser.SetFont(wx.Font(wx.NORMAL_FONT.GetPointSize(), 70, 90, 90, False, wx.EmptyString)) # noqa
connectionSizer.Add(self.stUser, 0, wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, 5) # noqa
self.txtUser = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 | wx.SIMPLE_BORDER) # noqa
connectionSizer.Add(self.txtUser, 1, wx.ALL | wx.EXPAND, 5)
self.stPass = wx.StaticText(self, wx.ID_ANY, 'Password:', wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT) # noqa
self.stPass.Wrap(-1)
self.stPass.SetFont(wx.Font(wx.NORMAL_FONT.GetPointSize(), 70, 90, 90, False, wx.EmptyString)) # noqa
connectionSizer.Add(self.stPass, 0, wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, 5) # noqa
self.txtPass = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_PASSWORD | wx.SIMPLE_BORDER) # noqa
connectionSizer.Add(self.txtPass, 1, wx.ALL | wx.EXPAND, 5)
sbSizer.Add(connectionSizer, 90, wx.EXPAND, 3)
formSizer.Add(sbSizer, 1, wx.ALL | wx.EXPAND, 7)
btnSizer = wx.FlexGridSizer(0, 3, 0, 25)
btnSizer.AddGrowableCol(0)
btnSizer.AddGrowableCol(1)
btnSizer.AddGrowableCol(2)
btnSizer.SetFlexibleDirection(wx.VERTICAL)
btnSizer.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_ALL)
self.btnTest = wx.Button(self, wx.ID_ANY, 'Test Connection', wx.DefaultPosition, wx.DefaultSize, 0) # noqa
btnSizer.Add(self.btnTest, 0, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_CENTER_HORIZONTAL, 5) # noqa
# self.btnSave = wx.Button(self, wx.ID_ANY, 'Save Connection', wx.DefaultPosition, wx.DefaultSize, 0) # noqa
# btnSizer.Add(self.btnSave, 0, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_CENTER_HORIZONTAL, 5) # noqa
# self.btnCancel = wx.Button(self, wx.ID_ANY, 'Cancel', wx.DefaultPosition, wx.DefaultSize, 0) # noqa
# btnSizer.Add(self.btnCancel, 0, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_CENTER_HORIZONTAL, 5) # noqa
formSizer.Add(btnSizer, 10, wx.EXPAND, 2)
self.SetSizer(formSizer)
self.Layout()
# Connect Events.
self.btnTest.Bind(wx.EVT_BUTTON, self.OnBtnTest)
# self.btnSave.Bind(wx.EVT_BUTTON, self.OnBtnSave)
# self.btnCancel.Bind(wx.EVT_BUTTON, self.OnBtnCancel)
self.btnSizer = btnSizer
self.formSizer = formSizer
self.btnTest.SetFocus()
def __del__(self):
pass
# Virtual event handlers, overide them in your derived class.
def OnBtnTest(self, event):
event.Skip()
# def OnBtnSave(self, event):
# event.Skip()
#
# def OnBtnCancel(self, event):
# event.Skip()
| bsd-3-clause |
dreamsxin/kbengine | kbe/res/scripts/common/Lib/test/test_codecmaps_jp.py | 60 | 1880 | #
# test_codecmaps_jp.py
# Codec mapping tests for Japanese encodings
#
from test import support
from test import multibytecodec_support
import unittest
class TestCP932Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp932'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/' \
'WINDOWS/CP932.TXT'
supmaps = [
(b'\x80', '\u0080'),
(b'\xa0', '\uf8f0'),
(b'\xfd', '\uf8f1'),
(b'\xfe', '\uf8f2'),
(b'\xff', '\uf8f3'),
]
for i in range(0xa1, 0xe0):
supmaps.append((bytes([i]), chr(i+0xfec0)))
class TestEUCJPCOMPATMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jp'
mapfilename = 'EUC-JP.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JP.TXT'
class TestSJISCOMPATMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jis'
mapfilename = 'SHIFTJIS.TXT'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE' \
'/EASTASIA/JIS/SHIFTJIS.TXT'
pass_enctest = [
(b'\x81_', '\\'),
]
pass_dectest = [
(b'\\', '\xa5'),
(b'~', '\u203e'),
(b'\x81_', '\\'),
]
class TestEUCJISX0213Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jisx0213'
mapfilename = 'EUC-JISX0213.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JISX0213.TXT'
class TestSJISX0213Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jisx0213'
mapfilename = 'SHIFT_JISX0213.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/SHIFT_JISX0213.TXT'
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
mi1980/projecthadoop3 | udacity/cs101-intro-cs/code/lesson4/problem-set/better-split.py | 4 | 1225 | # 1 Gold Star
# The built-in <string>.split() procedure works
# okay, but fails to find all the words on a page
# because it only uses whitespace to split the
# string. To do better, we should also use punctuation
# marks to split the page into words.
# Define a procedure, split_string, that takes two
# inputs: the string to split and a string containing
# all of the characters considered separators. The
# procedure should return a list of strings that break
# the source string up by the characters in the
# splitlist.
def split_string(source,splitlist):
separator = splitlist[0]
for character in splitlist:
source = source.replace(character, separator)
return [e for e in source.split(separator) if e]
out = split_string("This is a test-of the,string separation-code!"," ,!-")
print out
#>>> ['This', 'is', 'a', 'test', 'of', 'the', 'string', 'separation', 'code']
out = split_string("After the flood ... all the colors came out.", " .")
print out
#>>> ['After', 'the', 'flood', 'all', 'the', 'colors', 'came', 'out']
out = split_string("First Name,Last Name,Street Address,City,State,Zip Code",",")
print out
#>>>['First Name', 'Last Name', 'Street Address', 'City', 'State', 'Zip Code'] | mit |
danielfaulknor/webpymail | webpymail/mailapp/views/folder.py | 7 | 2936 | # -*- coding: utf-8 -*-
# WebPyMail - IMAP python/django web mail client
# Copyright (C) 2008 Helder Guerreiro
## This file is part of WebPyMail.
##
## WebPyMail is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## WebPyMail is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with WebPyMail. If not, see <http://www.gnu.org/licenses/>.
#
# Helder Guerreiro <helder@paxjulia.com>
#
# $Id$
#
"""Display folders and associated actions
"""
# Imports:
# Python
import base64
# Django
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import RequestContext
# Local
from mailapp.models import FoldersToExpand
from mail_utils import serverLogin
from themesapp.shortcuts import render_to_response
from utils.config import WebpymailConfig
##
# Views
##
@login_required
def show_folders_view(request):
'''Show the account folders.
'''
# Login to the server:
M = serverLogin( request )
# Special folders
# TODO: will get this from the user config
M.set_special_folders('INBOX', 'INBOX.Drafts', 'INBOX.Templates')
# Expandable folders
expand_folders = request.user.folderstoexpand_set.all()
if expand_folders:
M.set_expand_list(*[ f.folder_name for f in expand_folders ])
# Read the subscribed folder list:
M.refresh_folders(subscribed=True)
# Get the default identity
config = WebpymailConfig( request )
identity_list = config.identities()
default_address = identity_list[0]['mail_address']
return render_to_response('mail/folder_list.html',
{'server': M,
'address': default_address },
context_instance=RequestContext(request))
@login_required
def set_folder_expand(request, folder):
folder_name = base64.urlsafe_b64decode(str(folder))
user = request.user
folder_in_list = FoldersToExpand.objects.filter(user__exact=user,
folder_name__exact = folder_name).count()
if not folder_in_list:
f = FoldersToExpand( user = user, folder_name = folder_name)
f.save()
return HttpResponseRedirect(reverse('folder_list'))
@login_required
def set_folder_collapse(request, folder):
folder_name = base64.urlsafe_b64decode(str(folder))
user = request.user
FoldersToExpand.objects.filter(user__exact=user,
folder_name__exact = folder_name).delete()
return HttpResponseRedirect(reverse('folder_list'))
| gpl-3.0 |
hfp/tensorflow-xsmm | tensorflow/python/eager/benchmarks_test.py | 4 | 31608 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import keras
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import gradient_descent
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
def c_tfe_py_fastpath_execute(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
ctx = context.context()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
try:
return pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", name,
ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
class SubclassedKerasModel(keras.Model):
def __init__(self, initializer="ones"):
super(SubclassedKerasModel, self).__init__()
self.layer_a = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_b = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_c = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_d = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_e = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")
def call(self, x):
x = self.layer_a(x)
x = self.layer_b(x)
x = self.layer_c(x)
x = self.layer_d(x)
return self.layer_e(x)
def make_keras_model(initializer="ones"):
model_input = keras.Input(shape=(10,))
x = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")(model_input)
x = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")(x)
return keras.Model(inputs=model_input, outputs=x)
def make_sequential_keras_model(initializer="ones"):
model = keras.models.Sequential()
model.add(keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros",
input_shape=(10,)))
model.add(keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros"))
return model
class MicroBenchmarks(test.Benchmark):
def __init__(self):
# used for multiply benchmarks
self._m_2 = random_ops.random_uniform([2])
# used for matmul benchmarks
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_784 = random_ops.random_uniform((100, 784))
self._num_iters_2_by_2 = 30000
self._num_iters_100_by_784 = 1000
def _run(self, func, num_iters, execution_mode=None):
# call func to maybe warm up the GPU
ctx = context.context()
with ctx.execution_mode(execution_mode):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_create_np_array(self):
func = lambda: np.array([3.0])
self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device):
"""Benchmark overheads of creating a Tensor object."""
ctx = context.context()
handle = ctx._handle
if device == GPU:
# Warmup the GPU
ops.EagerTensor(value, context=handle, device=device)
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
self._run(func, 30000)
def benchmark_create_constant(self):
func = lambda: constant_op.constant(3.0)
self._run(func, 30000)
def benchmark_create_float_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
CPU)
def benchmark_create_int32_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_int32_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_list_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
def benchmark_create_float_tensor_from_np_array_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
GPU)
def benchmark_create_int32_tensor_from_list_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
def benchmark_create_int32_tensor_from_np_array_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
def benchmark_index_tensor_with_literal(self):
func = lambda: constant_op.constant([3.0])[0]
self._run(func, 30000)
def benchmark_index_tensor_with_tensor(self):
func = lambda idx=constant_op.constant(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
def benchmark_index_tensor_with_np_array(self):
func = lambda idx=np.array(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
def _benchmark_np_multiply(self, m, num_iters):
a = m.cpu().numpy()
func = lambda: a * a
self._run(func, num_iters)
def _benchmark_tf_multiply(self, m, num_iters):
func = lambda: m * m
self._run(func, num_iters)
def _benchmark_tf_multiply_op(self, m, num_iters):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
def benchmark_tf_multiply_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_op_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_identity(self):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_slowpath_tf_identity(self):
self._run(lambda: gen_array_ops.identity(1), 30000)
def benchmark_tfe_py_execute_identity(self):
m = self._m_2
ctx_handle = context.context()._handle
attrs = ("T", self._m_2.dtype.as_datatype_enum)
inputs = [m]
def f():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "Identity", inputs,
attrs, 1)
self._run(f, 30000)
def benchmark_tf_gradient_function_identity(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_tf_gradient_tape_push_pop(self):
def f():
with backprop.GradientTape():
pass
self._run(f, 30000)
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
b = a.T if transpose_b else a
func = lambda: np.dot(a, b)
self._run(func, num_iters)
def _benchmark_tf_matmul(self, m, transpose_b, num_iters,
execution_mode=None):
func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
def func():
gen_math_ops.mat_mul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b,
num_iters):
def func():
c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
device = context.context().device_name
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
def func():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs,
attrs, 1)
self._run(func, num_iters)
def _benchmark_defun_matmul(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
func = lambda: f(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_defun_matmul_forward_backward(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
def func():
with backprop.GradientTape() as gt:
gt.watch(m)
y = f(m, m, transpose_b=transpose_b)
_ = gt.gradient(y, m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_read_variable(self, m, num_iters):
self._run(m.value, num_iters)
def _benchmark_matmul_read_variable(self, m, num_iters):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_matmul_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._run(m.value, num_iters)
# Benchmarks for A^2, A of dimension 2 by 2.
def benchmark_np_matmul_2_by_2(self):
self._benchmark_np_matmul(
self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_tf_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
# Benchmarks for AA.T, A of dimension 100 by 784.
def benchmark_np_matmul_100_by_784(self):
self._benchmark_np_matmul(
self._m_100_by_784,
transpose_b=True,
num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU_async(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_without_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
cache_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(cache_computation, 30000)
def benchmark_defun_without_signature_and_with_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
def cache_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(cache_computation, 30000)
def benchmark_defun_with_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
signature_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(signature_computation, 30000)
def benchmark_defun_with_signature_and_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
def signature_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(signature_computation, 30000)
def benchmark_matmul_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_matmul_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_keras_model_subclassed(self):
model = SubclassedKerasModel()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# First call is more expensive (creates variables etc.), discount that.
func()
# The whole point of this test is to contrast subclassing with
# the functional style of keras model building, so validate that
# the models are equivalent.
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_functional(self):
model = make_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_subclassed
func()
assert np.equal(func(), SubclassedKerasModel()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_sequential(self):
model = make_sequential_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_functional
func()
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def _benchmark_keras_model_fit(self, model, run_eagerly=False):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse", run_eagerly=run_eagerly)
func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_evaluate(self, model, run_eagerly=False):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse", run_eagerly=run_eagerly)
func = lambda: model.evaluate(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.evaluate(dataset, steps=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_predict(self, model, run_eagerly=False):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors(tuple([data])).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse", run_eagerly=run_eagerly)
func = lambda: model.predict(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.predict(dataset, steps=1, verbose=0)
self._run(func, 1)
def benchmark_keras_model_subclassed_fit(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_graph_mode(self):
with context.graph_mode():
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_functional_fit(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_graph_mode(self):
with context.graph_mode():
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_sequential_fit(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_graph_mode(self):
with context.graph_mode():
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_subclassed_evaluate(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_subclassed_evaluate_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_functional_evaluate(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_functional_evaluate_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_sequential_evaluate(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_sequential_evaluate_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_subclassed_predict(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_subclassed_predict_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmark_keras_model_functional_predict(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_functional_predict_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmark_keras_model_sequential_predict(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_sequential_predict_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmarkScan(self):
elems = math_ops.range(1600)
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
def benchmarkScanDefun(self):
elems = math_ops.range(1600)
@function.defun
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
if __name__ == "__main__":
test.main()
| apache-2.0 |
RealImpactAnalytics/airflow | airflow/contrib/operators/segment_track_event_operator.py | 1 | 2675 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.segment_hook import SegmentHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SegmentTrackEventOperator(BaseOperator):
"""
Send Track Event to Segment for a specified user_id and event
:param user_id: The ID for this user in your database
:type user_id: string
:param event: The name of the event you're tracking
:type event: string
:param properties: A dictionary of properties for the event.
:type properties: dict
:param segment_conn_id: The connection ID to use when connecting to Segment.
:type segment_conn_id: string
:param segment_debug_mode: Determines whether Segment should run in debug mode.
Defaults to False
:type segment_debug_mode: boolean
"""
template_fields = ('user_id', 'event', 'properties')
ui_color = '#ffd700'
@apply_defaults
def __init__(self,
user_id,
event,
properties=None,
segment_conn_id='segment_default',
segment_debug_mode=False,
*args,
**kwargs):
super(SegmentTrackEventOperator, self).__init__(*args, **kwargs)
self.user_id = user_id
self.event = event
properties = properties or {}
self.properties = properties
self.segment_debug_mode = segment_debug_mode
self.segment_conn_id = segment_conn_id
def execute(self, context):
hook = SegmentHook(segment_conn_id=self.segment_conn_id,
segment_debug_mode=self.segment_debug_mode)
self.log.info(
'Sending track event ({0}) for user id: {1} with properties: {2}'.
format(self.event, self.user_id, self.properties))
hook.track(self.user_id, self.event, self.properties)
| apache-2.0 |
crafty78/ansible | test/runner/lib/http.py | 14 | 3182 | """
Primitive replacement for requests to avoid extra dependency.
Avoids use of urllib2 due to lack of SNI support.
"""
from __future__ import absolute_import, print_function
import json
try:
from urllib import urlencode
except ImportError:
# noinspection PyCompatibility,PyUnresolvedReferences,PyUnresolvedReferences
from urllib.parse import urlencode # pylint: disable=locally-disabled, import-error, no-name-in-module
from lib.util import (
CommonConfig,
ApplicationError,
run_command,
)
class HttpClient(object):
"""Make HTTP requests via curl."""
def __init__(self, args, always=False):
"""
:type args: CommonConfig
:type always: bool
"""
self.args = args
self.always = always
def get(self, url):
"""
:type url: str
:rtype: HttpResponse
"""
return self.request('GET', url)
def delete(self, url):
"""
:type url: str
:rtype: HttpResponse
"""
return self.request('DELETE', url)
def put(self, url, data=None, headers=None):
"""
:type url: str
:type data: str | None
:type headers: dict[str, str] | None
:rtype: HttpResponse
"""
return self.request('PUT', url, data, headers)
def request(self, method, url, data=None, headers=None):
"""
:type method: str
:type url: str
:type data: str | None
:type headers: dict[str, str] | None
:rtype: HttpResponse
"""
cmd = ['curl', '-s', '-S', '-i', '-X', method]
if headers is None:
headers = {}
headers['Expect'] = '' # don't send expect continue header
for header in headers.keys():
cmd += ['-H', '%s: %s' % (header, headers[header])]
if data is not None:
cmd += ['-d', data]
cmd += [url]
stdout, _ = run_command(self.args, cmd, capture=True, always=self.always)
if self.args.explain and not self.always:
return HttpResponse(200, '')
header, body = stdout.split('\r\n\r\n', 1)
response_headers = header.split('\r\n')
first_line = response_headers[0]
http_response = first_line.split(' ')
status_code = int(http_response[1])
return HttpResponse(status_code, body)
class HttpResponse(object):
"""HTTP response from curl."""
def __init__(self, status_code, response):
"""
:type status_code: int
:type response: str
"""
self.status_code = status_code
self.response = response
def json(self):
"""
:rtype: any
"""
try:
return json.loads(self.response)
except ValueError:
raise HttpError(self.status_code, 'Cannot parse response as JSON:\n%s' % self.response)
class HttpError(ApplicationError):
"""HTTP response as an error."""
def __init__(self, status, message):
"""
:type status: int
:type message: str
"""
super(HttpError, self).__init__('%s: %s' % (status, message))
self.status = status
| gpl-3.0 |
dreipol/cmsplugin-filer | cmsplugin_filer_video/models.py | 26 | 3036 | from cms.models import CMSPlugin
from cmsplugin_filer_video import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer.fields.file import FilerFileField
from filer.fields.image import FilerImageField
from os.path import basename
import re
class FilerVideo(CMSPlugin):
# player settings
movie = FilerFileField(verbose_name=_('movie file'), help_text=_('use .flv file or h264 encoded video file'), blank=True, null=True)
movie_url = models.CharField(_('movie url'), max_length=255, help_text=_('vimeo or youtube video url. Example: http://www.youtube.com/watch?v=YFa59lK-kpo'), blank=True, null=True)
image = FilerImageField(verbose_name=_('image'), help_text=_('preview image file'), null=True, blank=True, related_name='filer_video_image')
width = models.PositiveSmallIntegerField(_('width'), default=settings.VIDEO_WIDTH)
height = models.PositiveSmallIntegerField(_('height'), default=settings.VIDEO_HEIGHT)
auto_play = models.BooleanField(_('auto play'), default=settings.VIDEO_AUTOPLAY)
auto_hide = models.BooleanField(_('auto hide'), default=settings.VIDEO_AUTOHIDE)
fullscreen = models.BooleanField(_('fullscreen'), default=settings.VIDEO_FULLSCREEN)
loop = models.BooleanField(_('loop'), default=settings.VIDEO_LOOP)
# plugin settings
bgcolor = models.CharField(_('background color'), max_length=6, default=settings.VIDEO_BG_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
textcolor = models.CharField(_('text color'), max_length=6, default=settings.VIDEO_TEXT_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
seekbarcolor = models.CharField(_('seekbar color'), max_length=6, default=settings.VIDEO_SEEKBAR_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
seekbarbgcolor = models.CharField(_('seekbar bg color'), max_length=6, default=settings.VIDEO_SEEKBARBG_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
loadingbarcolor = models.CharField(_('loadingbar color'), max_length=6, default=settings.VIDEO_LOADINGBAR_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
buttonoutcolor = models.CharField(_('button out color'), max_length=6, default=settings.VIDEO_BUTTON_OUT_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
buttonovercolor = models.CharField(_('button over color'), max_length=6, default=settings.VIDEO_BUTTON_OVER_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
buttonhighlightcolor = models.CharField(_('button highlight color'), max_length=6, default=settings.VIDEO_BUTTON_HIGHLIGHT_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
def __unicode__(self):
if self.movie:
name = self.movie.path
else:
name = self.movie_url
return u"%s" % basename(name)
def get_height(self):
return "%s" % (self.height)
def get_width(self):
return "%s" % (self.width)
def get_movie(self):
if self.movie:
return self.movie.url
else:
return self.movie_url
| bsd-3-clause |
RAPD/RAPD | src/plugins/subcontractors/xdsme/mos2xds.py | 10 | 4318 | #!/usr/bin/env python
"""
24/09/04 1st version pierre.legrand@synchrotron-soleil.fr
Note: This program assum that the most commons XDS frame is choosen
(see the distributed XDS input files):
Beam is along the Z axis, Y axis point verticaly down (like gravity),
and the X axis is defined to yield an orthonormal right handed
laboratory coordinate system {X,Y,Z}.
TODO:
- detector conversion
Uses the ScientificPython module by Konrad Hinsen
http://starship.python.net/crew/hinsen/scientific.html
License: http://www.opensource.org/licenses/bsd-license.php
"""
__author__ = "Pierre Legrand (pierre.legrand \at synchrotron-soleil.fr)"
__date__ = "16-11-2005"
__copyright__ = "Copyright (c) 2005 Pierre Legrand"
__license__ = "New BSD License"
__version__ = "0.2.2"
import math
import sys
import os.path
from XOconv import *
_progname = os.path.split(sys.argv[0])[1]
_usage = """
Converting Mosflm crystal orientation informations to XDS format.
The program convert the orientation matix from Mosflm (.mat file)
to XDS pseudo XPARM.XDS file:
USAGE: %s [OPTION]... FILE
FILE is the Mosflm crystal orientation file (.mat).
OPTIONS:
-h
--help
Print this help message.
-p
--pg-permutations
Print out the other equivalent crystal orientation
informations based on the point group allowed permutations.
-v
--verbose
Turn on verbose output.
""" % _progname
Qmos2xds = mat3(ez,-ey,ex).transpose()
XPARM_fmt = """%(first_frame)6d%(phi_init)12.6f%(delta_phi)12.6f%(spindle)s
%(wavelength)15.6f 0.000000 0.000000%(inv_wavelength)15.6f
%(nx)10d%(ny)10d%(qy)10.5f%(qy)10.5f
%(distance)15.6f%(beam_x)15.6f%(beam_y)15.6f
%(detector_orientation)s
%(spg)10d%(a)10.3f%(b)10.3f%(c)10.3f%(alpha)10.3f%(beta)10.3f%(gamma)10.3f
%(xds_crystal_orientation)s
"""
XPARM_fmt =""" 1 PHI_INIT DELT_PHI 1.000000 0.000000 0.000000
%(wavelength)15.6f 0.000000 0.000000%(inv_wavelength)15.6f
NX_PIX NY_PIX X_PIXSIZ Y_PIXSIZ
DISTANCE X_BEAM_CENT Y_BEAM_CENT
1.000000 0.000000 0.000000
0.000000 1.000000 0.000000
0.000000 0.000000 1.000000
1%(cellStr)s
%(xds_crystal_orientation)s"""
def getCellParameters(UB):
"""Return an array containing the cell parameters with angles en degree"""
Ar = Vector(UB[:,0])
Br = Vector(UB[:,1])
Cr = Vector(UB[:,2])
return Numeric.array([Ar.length(), Br.length(), Cr.length(),
Br.angle(Cr)*r2d, Cr.angle(Ar)*r2d, Ar.angle(Br)*r2d])
if __name__ == '__main__':
import getopt
_debug = False
_do_PG_permutations = False
_verbose = False
short_opt = "dhpv"
long_opt = ["debug",
"help",
"pg-permutations",
"verbose"]
try:
opts, inputf = getopt.getopt(sys.argv[1:], short_opt, long_opt)
except getopt.GetoptError:
# print help information and exit:
print _usage
sys.exit(2)
for o, a in opts:
if o in ("-d", "--debug"):
_debug = True
if o in ("-v", "--verbose"):
_verbose = True
if o in ("-h", "--help"):
print _usage
sys.exit()
if o in ("-p","--pg-permutations"):
_do_PG_permutations = True
MOSi = MosflmParser(inputf[0])
xdsPar = {}
xdsPar["cellStr"] = 6*"%10.3f" % tuple(MOSi.cell)
wavelength = MOSi.UB_to_wavelength()
xdsPar["wavelength"] = wavelength
xdsPar["inv_wavelength"] = 1/wavelength
UBxds = Qmos2xds * MOSi.UB / wavelength
Ar = vec3(UBxds.getColumn(0))
Br = vec3(UBxds.getColumn(1))
Cr = vec3(UBxds.getColumn(2))
volumInv = Ar * Br.cross(Cr)
A = Br.cross(Cr)/volumInv
B = Cr.cross(Ar)/volumInv
C = Ar.cross(Br)/volumInv
#print A.length(), B.length(), C.length(), B.angle(C)*r2d, A.angle(C)*r2d, A.angle(B)*r2d
fmtM = 3 * "%15.6f" + "\n"
xdsPar["xds_crystal_orientation"] = fmtM % tuple(A) + \
fmtM % tuple(B) + \
fmtM % tuple(C)
print XPARM_fmt % xdsPar,
| agpl-3.0 |
Bulochkin/tensorflow_pack | tensorflow/contrib/learn/python/learn/estimators/run_config_test.py | 31 | 13011 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""run_config.py tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as core_run_config
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
TEST_DIR = "test_dir"
ANOTHER_TEST_DIR = "another_test_dir"
MASTER = "master_"
RANDOM_SEED = 123
patch = test.mock.patch
class RunConfigTest(test.TestCase):
def test_instance_of_core_run_config(self):
config = run_config_lib.RunConfig()
self.assertTrue(isinstance(config, core_run_config.RunConfig))
def test_defaults_with_no_tf_config(self):
config = run_config_lib.RunConfig()
self.assertEqual(config.master, "")
self.assertEqual(config.task_id, 0)
self.assertEqual(config.num_ps_replicas, 0)
self.assertEqual(config.cluster_spec, {})
self.assertIsNone(config.task_type)
self.assertTrue(config.is_chief)
self.assertEqual(config.evaluation_master, "")
def test_values_from_tf_config(self):
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.WORKER: ["host3:3", "host4:4", "host5:5"]
},
"task": {
"type": run_config_lib.TaskType.WORKER,
"index": 1
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertEqual(config.master, "grpc://host4:4")
self.assertEqual(config.task_id, 1)
self.assertEqual(config.num_ps_replicas, 2)
self.assertEqual(config.num_worker_replicas, 3)
self.assertEqual(config.cluster_spec.as_dict(), tf_config["cluster"])
self.assertEqual(config.task_type, run_config_lib.TaskType.WORKER)
self.assertFalse(config.is_chief)
self.assertEqual(config.evaluation_master, "")
def test_explicitly_specified_values(self):
cluster_spec = {
run_config_lib.TaskType.PS: ["localhost:9990"],
"my_job_name": ["localhost:9991", "localhost:9992", "localhost:0"]
}
tf_config = {
"cluster": cluster_spec,
"task": {
"type": run_config_lib.TaskType.WORKER,
"index": 2
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master="localhost:0", evaluation_master="localhost:9991")
self.assertEqual(config.master, "localhost:0")
self.assertEqual(config.task_id, 2)
self.assertEqual(config.num_ps_replicas, 1)
self.assertEqual(config.num_worker_replicas, 0)
self.assertEqual(config.cluster_spec, server_lib.ClusterSpec(cluster_spec))
self.assertEqual(config.task_type, run_config_lib.TaskType.WORKER)
self.assertFalse(config.is_chief)
self.assertEqual(config.evaluation_master, "localhost:9991")
def test_single_node_in_cluster_spec_produces_empty_master(self):
tf_config = {"cluster": {run_config_lib.TaskType.WORKER: ["host1:1"]}}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertEqual(config.master, "")
def test_no_task_type_produces_empty_master(self):
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.WORKER: ["host3:3", "host4:4", "host5:5"]
},
# Omits "task": {"type": "worker}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertEqual(config.master, "")
def test_invalid_job_name_raises(self):
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.WORKER: ["host3:3", "host4:4", "host5:5"]
},
"task": {
"type": "not_in_cluster_spec"
}
}
expected_msg_regexp = "not_in_cluster_spec is not a valid task"
with patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), self.assertRaisesRegexp(
ValueError, expected_msg_regexp):
run_config_lib.RunConfig()
def test_illegal_task_index_raises(self):
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.WORKER: ["host3:3", "host4:4", "host5:5"]
},
"task": {
"type": run_config_lib.TaskType.WORKER,
"index": 3
}
}
expected_msg_regexp = "3 is not a valid task_id"
with patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), self.assertRaisesRegexp(
ValueError, expected_msg_regexp):
run_config_lib.RunConfig()
def test_is_chief_from_cloud_tf_config(self):
# is_chief should be true when ["task"]["type"] == "master" and
# index == 0 and ["task"]["environment"] == "cloud". Note that
# test_values_from_tf_config covers the non-master case.
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.MASTER: ["host3:3"],
run_config_lib.TaskType.WORKER: ["host4:4", "host5:5", "host6:6"]
},
"task": {
"type": run_config_lib.TaskType.MASTER,
"index": 0
},
"environment": "cloud"
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertTrue(config.is_chief)
def test_is_chief_from_noncloud_tf_config(self):
# is_chief should be true when ["task"]["type"] == "worker" and
# index == 0 if ["task"]["environment"] != "cloud".
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.MASTER: ["host3:3"],
run_config_lib.TaskType.WORKER: ["host4:4", "host5:5", "host6:6"]
},
"task": {
"type": run_config_lib.TaskType.WORKER,
"index": 0
},
"environment": "random"
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertTrue(config.is_chief)
# But task 0 for a job named "master" should not be.
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.MASTER: ["host3:3"],
run_config_lib.TaskType.WORKER: ["host4:4", "host5:5", "host6:6"]
},
"task": {
"type": run_config_lib.TaskType.MASTER,
"index": 0
},
"environment": "random"
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertFalse(config.is_chief)
def test_default_is_chief_from_tf_config_without_job_name(self):
tf_config = {"cluster": {}, "task": {}}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertTrue(config.is_chief)
def test_model_dir(self):
empty_config = run_config_lib.RunConfig()
self.assertIsNone(empty_config.model_dir)
config = run_config_lib.RunConfig(model_dir=TEST_DIR)
self.assertEqual(TEST_DIR, config.model_dir)
def test_model_dir_in_tf_config(self):
tf_config = {"model_dir": TEST_DIR}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
run_config = run_config_lib.RunConfig()
self.assertEqual(TEST_DIR, run_config.model_dir)
def test_model_dir_both_in_tf_config_and_constructor(self):
tf_config = {"model_dir": TEST_DIR}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
run_config = run_config_lib.RunConfig(model_dir=TEST_DIR)
self.assertEqual(TEST_DIR, run_config.model_dir)
def test_model_dir_fail_if_constructor_value_mismatch_tf_config(self):
tf_config = {"model_dir": TEST_DIR}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
with self.assertRaisesRegexp(
ValueError,
"`model_dir` provided in RunConfig .* must have "
"the same value .* in TF_CONFIG"):
run_config_lib.RunConfig(model_dir=TEST_DIR + "/sub_dir")
def test_replace(self):
config = run_config_lib.RunConfig(
tf_random_seed=RANDOM_SEED, model_dir=TEST_DIR)
self.assertEqual(TEST_DIR, config.model_dir)
self.assertEqual(RANDOM_SEED, config.tf_random_seed)
new_config = config.replace(model_dir=ANOTHER_TEST_DIR)
self.assertEqual(ANOTHER_TEST_DIR, new_config.model_dir)
self.assertEqual(RANDOM_SEED, new_config.tf_random_seed)
self.assertEqual(RANDOM_SEED, config.tf_random_seed)
def test_uid_for_different_configs(self):
config = run_config_lib.RunConfig(
tf_random_seed=RANDOM_SEED, model_dir=TEST_DIR)
expected_uid = config.uid()
# Check for 10 times, which should prove something.
for _ in range(10):
self.assertEqual(expected_uid, config.uid())
new_config = config.replace(model_dir=ANOTHER_TEST_DIR)
self.assertEqual(TEST_DIR, config.model_dir)
self.assertNotEqual(expected_uid, new_config.uid())
self.assertEqual(ANOTHER_TEST_DIR, new_config.model_dir)
def test_uid_for_whitelist(self):
whitelist = ["model_dir"]
config = run_config_lib.RunConfig(
tf_random_seed=RANDOM_SEED, model_dir=TEST_DIR)
expected_uid = config.uid(whitelist)
self.assertEqual(expected_uid, config.uid(whitelist))
new_config = config.replace(model_dir=ANOTHER_TEST_DIR)
self.assertEqual(TEST_DIR, config.model_dir)
self.assertEqual(expected_uid, new_config.uid(whitelist))
self.assertEqual(ANOTHER_TEST_DIR, new_config.model_dir)
def test_uid_for_default_whitelist(self):
config = run_config_lib.RunConfig(
tf_random_seed=11,
save_summary_steps=12,
save_checkpoints_steps=13,
save_checkpoints_secs=14,
session_config=config_pb2.ConfigProto(allow_soft_placement=True),
keep_checkpoint_max=16,
keep_checkpoint_every_n_hours=17)
self.assertEqual(11, config.tf_random_seed)
self.assertEqual(12, config.save_summary_steps)
self.assertEqual(13, config.save_checkpoints_steps)
self.assertEqual(14, config.save_checkpoints_secs)
self.assertEqual(config_pb2.ConfigProto(allow_soft_placement=True),
config.session_config)
self.assertEqual(16, config.keep_checkpoint_max)
self.assertEqual(17, config.keep_checkpoint_every_n_hours)
new_config = run_config_lib.RunConfig(
tf_random_seed=21,
save_summary_steps=22,
save_checkpoints_steps=23,
save_checkpoints_secs=24,
session_config=config_pb2.ConfigProto(allow_soft_placement=False),
keep_checkpoint_max=26,
keep_checkpoint_every_n_hours=27)
self.assertEqual(config.uid(), new_config.uid())
# model_dir is not on the default whitelist.
self.assertNotEqual(config.uid(whitelist=[]),
new_config.uid(whitelist=[]))
new_config = new_config.replace(model_dir=ANOTHER_TEST_DIR)
self.assertNotEqual(config.uid(), new_config.uid())
def test_uid_for_deepcopy(self):
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.WORKER: ["host3:3", "host4:4", "host5:5"]
},
"task": {
"type": run_config_lib.TaskType.WORKER,
"index": 1
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
tf_random_seed=RANDOM_SEED, model_dir=TEST_DIR)
self.assertEqual(config.cluster_spec.as_dict(), tf_config["cluster"])
config = run_config_lib.RunConfig(
tf_random_seed=RANDOM_SEED, model_dir=TEST_DIR)
expected_uid = config.uid()
new_config = copy.deepcopy(config)
self.assertEqual(expected_uid, new_config.uid())
if __name__ == "__main__":
test.main()
| apache-2.0 |
dneiter/exabgp | lib/exabgp/reactor/network/incoming.py | 6 | 1063 | from exabgp.util.errstr import errstr
from .connection import Connection
from .tcp import nagle
from .tcp import async
from .error import NetworkError
from .error import NotConnected
from exabgp.bgp.message import Notify
class Incoming (Connection):
direction = 'incoming'
def __init__ (self, afi, peer, local, io):
Connection.__init__(self,afi,peer,local)
self.logger.wire("Connection from %s" % self.peer)
try:
self.io = io
async(self.io,peer)
nagle(self.io,peer)
except NetworkError,exc:
self.close()
raise NotConnected(errstr(exc))
# XXX: FIXME: is that code ever called ?
def notification (self, code, subcode, message):
try:
notification = Notify(code,subcode,message).message()
for boolean in self.writer(notification):
yield False
# self.logger.message(self.me('>> NOTIFICATION (%d,%d,"%s")' % (notification.code,notification.subcode,notification.data)),'error')
yield True
except NetworkError:
pass # This is only be used when closing session due to unconfigured peers - so issues do not matter
| bsd-3-clause |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/ctypes/test/test_anon.py | 264 | 2051 | import unittest
from ctypes import *
class AnonTest(unittest.TestCase):
def test_anon(self):
class ANON(Union):
_fields_ = [("a", c_int),
("b", c_int)]
class Y(Structure):
_fields_ = [("x", c_int),
("_", ANON),
("y", c_int)]
_anonymous_ = ["_"]
self.assertEqual(Y.a.offset, sizeof(c_int))
self.assertEqual(Y.b.offset, sizeof(c_int))
self.assertEqual(ANON.a.offset, 0)
self.assertEqual(ANON.b.offset, 0)
def test_anon_nonseq(self):
# TypeError: _anonymous_ must be a sequence
self.assertRaises(TypeError,
lambda: type(Structure)("Name",
(Structure,),
{"_fields_": [], "_anonymous_": 42}))
def test_anon_nonmember(self):
# AttributeError: type object 'Name' has no attribute 'x'
self.assertRaises(AttributeError,
lambda: type(Structure)("Name",
(Structure,),
{"_fields_": [],
"_anonymous_": ["x"]}))
def test_nested(self):
class ANON_S(Structure):
_fields_ = [("a", c_int)]
class ANON_U(Union):
_fields_ = [("_", ANON_S),
("b", c_int)]
_anonymous_ = ["_"]
class Y(Structure):
_fields_ = [("x", c_int),
("_", ANON_U),
("y", c_int)]
_anonymous_ = ["_"]
self.assertEqual(Y.x.offset, 0)
self.assertEqual(Y.a.offset, sizeof(c_int))
self.assertEqual(Y.b.offset, sizeof(c_int))
self.assertEqual(Y._.offset, sizeof(c_int))
self.assertEqual(Y.y.offset, sizeof(c_int) * 2)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
shashkin/django-authority | docs/conf.py | 10 | 6459 | # -*- coding: utf-8 -*-
#
# django-authority documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 9 10:52:07 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
#sys.path.append(os.path.join(os.path.dirname(__file__), '../src/'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-authority'
copyright = u'2009, the django-authority team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8'
# The full version, including alpha/beta/rc tags.
release = '0.8dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '.static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-authoritydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-authority.tex', u'django-authority Documentation',
u'The django-authority team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| bsd-3-clause |
pdxwebdev/yadapy | yada/lib/python2.7/site-packages/_pytest/junitxml.py | 2 | 13379 | """
report test results in JUnit-XML format,
for use with Jenkins and build integration servers.
Based on initial code from Ross Lawley.
"""
# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
import py
import os
import re
import sys
import time
import pytest
# Python 2.X and 3.X compatibility
if sys.version_info[0] < 3:
from codecs import open
else:
unichr = chr
unicode = str
long = int
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0d)
_legal_ranges = (
(0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
)
_legal_xml_re = [
unicode("%s-%s") % (unichr(low), unichr(high))
for (low, high) in _legal_ranges if low < sys.maxunicode
]
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
_py_ext_re = re.compile(r"\.py$")
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return unicode('#x%02X') % i
else:
return unicode('#x%04X') % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
class _NodeReporter(object):
def __init__(self, nodeid, xml):
self.id = nodeid
self.xml = xml
self.add_stats = self.xml.add_stats
self.duration = 0
self.properties = []
self.nodes = []
self.testcase = None
self.attrs = {}
def append(self, node):
self.xml.add_stats(type(node).__name__)
self.nodes.append(node)
def add_property(self, name, value):
self.properties.append((str(name), bin_xml_escape(value)))
def make_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.properties:
return Junit.properties([
Junit.property(name=name, value=value)
for name, value in self.properties
])
return ''
def record_testreport(self, testreport):
assert not self.testcase
names = mangle_test_address(testreport.nodeid)
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
attrs = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
}
if testreport.location[1] is not None:
attrs["line"] = testreport.location[1]
self.attrs = attrs
def to_xml(self):
testcase = Junit.testcase(time=self.duration, **self.attrs)
testcase.append(self.make_properties_node())
for node in self.nodes:
testcase.append(node)
return testcase
def _add_simple(self, kind, message, data=None):
data = bin_xml_escape(data)
node = kind(data, message=message)
self.append(node)
def _write_captured_output(self, report):
for capname in ('out', 'err'):
content = getattr(report, 'capstd' + capname)
if content:
tag = getattr(Junit, 'system-' + capname)
self.append(tag(bin_xml_escape(content)))
def append_pass(self, report):
self.add_stats('passed')
self._write_captured_output(report)
def append_failure(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self._add_simple(
Junit.skipped,
"xfail-marked test passes unexpectedly")
else:
if hasattr(report.longrepr, "reprcrash"):
message = report.longrepr.reprcrash.message
elif isinstance(report.longrepr, (unicode, str)):
message = report.longrepr
else:
message = str(report.longrepr)
message = bin_xml_escape(message)
fail = Junit.failure(message=message)
fail.append(bin_xml_escape(report.longrepr))
self.append(fail)
self._write_captured_output(report)
def append_collect_error(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.error(bin_xml_escape(report.longrepr),
message="collection failure"))
def append_collect_skipped(self, report):
self._add_simple(
Junit.skipped, "collection skipped", report.longrepr)
def append_error(self, report):
self._add_simple(
Junit.error, "test setup failure", report.longrepr)
self._write_captured_output(report)
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
self._add_simple(
Junit.skipped, "expected test failure", report.wasxfail
)
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = bin_xml_escape(skipreason[9:])
self.append(
Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
type="pytest.skip",
message=skipreason))
self._write_captured_output(report)
def finalize(self):
data = self.to_xml().unicode(indent=0)
self.__dict__.clear()
self.to_xml = lambda: py.xml.raw(data)
@pytest.fixture
def record_xml_property(request):
"""Add extra xml properties to the tag for the calling test.
The fixture is callable with ``(name, value)``, with value being automatically
xml-encoded.
"""
request.node.warn(
code='C3',
message='record_xml_property is an experimental feature',
)
xml = getattr(request.config, "_xml", None)
if xml is not None:
node_reporter = xml.node_reporter(request.node.nodeid)
return node_reporter.add_property
else:
def add_property_noop(name, value):
pass
return add_property_noop
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption(
'--junitxml', '--junit-xml',
action="store",
dest="xmlpath",
metavar="path",
default=None,
help="create junit-xml style report file at given path.")
group.addoption(
'--junitprefix', '--junit-prefix',
action="store",
metavar="str",
default=None,
help="prepend prefix to classnames in junit-xml output")
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, 'slaveinput'):
config._xml = LogXML(xmlpath, config.option.junitprefix)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, '_xml', None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_test_address(address):
path, possible_open_bracket, params = address.partition('[')
names = path.split("::")
try:
names.remove('()')
except ValueError:
pass
# convert file path to dotted path
names[0] = names[0].replace("/", '.')
names[0] = _py_ext_re.sub("", names[0])
# put any params back
names[-1] += possible_open_bracket + params
return names
class LogXML(object):
def __init__(self, logfile, prefix):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.stats = dict.fromkeys([
'error',
'passed',
'failure',
'skipped',
], 0)
self.node_reporters = {} # nodeid -> _NodeReporter
self.node_reporters_ordered = []
self.global_properties = []
def finalize(self, report):
nodeid = getattr(report, 'nodeid', report)
# local hack to handle xdist report order
slavenode = getattr(report, 'node', None)
reporter = self.node_reporters.pop((nodeid, slavenode))
if reporter is not None:
reporter.finalize()
def node_reporter(self, report):
nodeid = getattr(report, 'nodeid', report)
# local hack to handle xdist report order
slavenode = getattr(report, 'node', None)
key = nodeid, slavenode
if key in self.node_reporters:
# TODO: breasks for --dist=each
return self.node_reporters[key]
reporter = _NodeReporter(nodeid, self)
self.node_reporters[key] = reporter
self.node_reporters_ordered.append(reporter)
return reporter
def add_stats(self, key):
if key in self.stats:
self.stats[key] += 1
def _opentestcase(self, report):
reporter = self.node_reporter(report)
reporter.record_testreport(report)
return reporter
def pytest_runtest_logreport(self, report):
"""handle a setup/call/teardown report, generating the appropriate
xml tags as necessary.
note: due to plugins like xdist, this hook may be called in interlaced
order with reports from other nodes. for example:
usual call order:
-> setup node1
-> call node1
-> teardown node1
-> setup node2
-> call node2
-> teardown node2
possible call order in xdist:
-> setup node1
-> call node1
-> setup node2
-> call node2
-> teardown node2
-> teardown node1
"""
if report.passed:
if report.when == "call": # ignore setup/teardown
reporter = self._opentestcase(report)
reporter.append_pass(report)
elif report.failed:
reporter = self._opentestcase(report)
if report.when == "call":
reporter.append_failure(report)
else:
reporter.append_error(report)
elif report.skipped:
reporter = self._opentestcase(report)
reporter.append_skipped(report)
self.update_testcase_duration(report)
if report.when == "teardown":
self.finalize(report)
def update_testcase_duration(self, report):
"""accumulates total duration for nodeid from given report and updates
the Junit.testcase with the new total if already created.
"""
reporter = self.node_reporter(report)
reporter.duration += getattr(report, 'duration', 0.0)
def pytest_collectreport(self, report):
if not report.passed:
reporter = self._opentestcase(report)
if report.failed:
reporter.append_collect_error(report)
else:
reporter.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
reporter = self.node_reporter('internal')
reporter.attrs.update(classname="pytest", name='internal')
reporter._add_simple(Junit.error, 'internal error', excrepr)
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
dirname = os.path.dirname(os.path.abspath(self.logfile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(self.logfile, 'w', encoding='utf-8')
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] + self.stats['error']
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(Junit.testsuite(
self._get_global_properties_node(),
[x.to_xml() for x in self.node_reporters_ordered],
name="pytest",
errors=self.stats['error'],
failures=self.stats['failure'],
skips=self.stats['skipped'],
tests=numtests,
time="%.3f" % suite_time_delta, ).unicode(indent=0))
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-",
"generated xml file: %s" % (self.logfile))
def add_global_property(self, name, value):
self.global_properties.append((str(name), bin_xml_escape(value)))
def _get_global_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.global_properties:
return Junit.properties(
[
Junit.property(name=name, value=value)
for name, value in self.global_properties
]
)
return ''
| gpl-3.0 |
elkingtonmcb/nupic | tests/swarming/nupic/swarming/experiments/dummy_multi_v2/description.py | 32 | 15364 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'mean'),
(u'address', 'first')],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'address': { 'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21},
'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 200,
'minval': 0,
'n': 1500,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21},
'gym': { 'fieldname': u'gym',
'n': 300,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : {u'info': u'test_NoProviders',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption',
inferenceElement=InferenceElement.prediction,
metric='rmse'),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': [".*nupicScore.*"],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
kyonetca/potcoin | contrib/pyminer/pyminer.py | 6 | 6435 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 42000
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
mancoast/CPythonPyc_test | cpython/270_test_bool.py | 20 | 14185 | # Test properties of bool promised by PEP 285
import unittest
from test import test_support
import os
class BoolTest(unittest.TestCase):
def test_subclass(self):
try:
class C(bool):
pass
except TypeError:
pass
else:
self.fail("bool should not be subclassable")
self.assertRaises(TypeError, int.__new__, bool, 0)
def test_print(self):
try:
fo = open(test_support.TESTFN, "wb")
print >> fo, False, True
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), 'False True\n')
finally:
fo.close()
os.remove(test_support.TESTFN)
def test_repr(self):
self.assertEqual(repr(False), 'False')
self.assertEqual(repr(True), 'True')
self.assertEqual(eval(repr(False)), False)
self.assertEqual(eval(repr(True)), True)
def test_str(self):
self.assertEqual(str(False), 'False')
self.assertEqual(str(True), 'True')
def test_int(self):
self.assertEqual(int(False), 0)
self.assertIsNot(int(False), False)
self.assertEqual(int(True), 1)
self.assertIsNot(int(True), True)
def test_float(self):
self.assertEqual(float(False), 0.0)
self.assertIsNot(float(False), False)
self.assertEqual(float(True), 1.0)
self.assertIsNot(float(True), True)
def test_long(self):
self.assertEqual(long(False), 0L)
self.assertIsNot(long(False), False)
self.assertEqual(long(True), 1L)
self.assertIsNot(long(True), True)
def test_math(self):
self.assertEqual(+False, 0)
self.assertIsNot(+False, False)
self.assertEqual(-False, 0)
self.assertIsNot(-False, False)
self.assertEqual(abs(False), 0)
self.assertIsNot(abs(False), False)
self.assertEqual(+True, 1)
self.assertIsNot(+True, True)
self.assertEqual(-True, -1)
self.assertEqual(abs(True), 1)
self.assertIsNot(abs(True), True)
self.assertEqual(~False, -1)
self.assertEqual(~True, -2)
self.assertEqual(False+2, 2)
self.assertEqual(True+2, 3)
self.assertEqual(2+False, 2)
self.assertEqual(2+True, 3)
self.assertEqual(False+False, 0)
self.assertIsNot(False+False, False)
self.assertEqual(False+True, 1)
self.assertIsNot(False+True, True)
self.assertEqual(True+False, 1)
self.assertIsNot(True+False, True)
self.assertEqual(True+True, 2)
self.assertEqual(True-True, 0)
self.assertIsNot(True-True, False)
self.assertEqual(False-False, 0)
self.assertIsNot(False-False, False)
self.assertEqual(True-False, 1)
self.assertIsNot(True-False, True)
self.assertEqual(False-True, -1)
self.assertEqual(True*1, 1)
self.assertEqual(False*1, 0)
self.assertIsNot(False*1, False)
self.assertEqual(True//1, 1)
self.assertIsNot(True//1, True)
self.assertEqual(False//1, 0)
self.assertIsNot(False//1, False)
for b in False, True:
for i in 0, 1, 2:
self.assertEqual(b**i, int(b)**i)
self.assertIsNot(b**i, bool(int(b)**i))
for a in False, True:
for b in False, True:
self.assertIs(a&b, bool(int(a)&int(b)))
self.assertIs(a|b, bool(int(a)|int(b)))
self.assertIs(a^b, bool(int(a)^int(b)))
self.assertEqual(a&int(b), int(a)&int(b))
self.assertIsNot(a&int(b), bool(int(a)&int(b)))
self.assertEqual(a|int(b), int(a)|int(b))
self.assertIsNot(a|int(b), bool(int(a)|int(b)))
self.assertEqual(a^int(b), int(a)^int(b))
self.assertIsNot(a^int(b), bool(int(a)^int(b)))
self.assertEqual(int(a)&b, int(a)&int(b))
self.assertIsNot(int(a)&b, bool(int(a)&int(b)))
self.assertEqual(int(a)|b, int(a)|int(b))
self.assertIsNot(int(a)|b, bool(int(a)|int(b)))
self.assertEqual(int(a)^b, int(a)^int(b))
self.assertIsNot(int(a)^b, bool(int(a)^int(b)))
self.assertIs(1==1, True)
self.assertIs(1==0, False)
self.assertIs(0<1, True)
self.assertIs(1<0, False)
self.assertIs(0<=0, True)
self.assertIs(1<=0, False)
self.assertIs(1>0, True)
self.assertIs(1>1, False)
self.assertIs(1>=1, True)
self.assertIs(0>=1, False)
self.assertIs(0!=1, True)
self.assertIs(0!=0, False)
x = [1]
self.assertIs(x is x, True)
self.assertIs(x is not x, False)
self.assertIs(1 in x, True)
self.assertIs(0 in x, False)
self.assertIs(1 not in x, False)
self.assertIs(0 not in x, True)
x = {1: 2}
self.assertIs(x is x, True)
self.assertIs(x is not x, False)
self.assertIs(1 in x, True)
self.assertIs(0 in x, False)
self.assertIs(1 not in x, False)
self.assertIs(0 not in x, True)
self.assertIs(not True, False)
self.assertIs(not False, True)
def test_convert(self):
self.assertRaises(TypeError, bool, 42, 42)
self.assertIs(bool(10), True)
self.assertIs(bool(1), True)
self.assertIs(bool(-1), True)
self.assertIs(bool(0), False)
self.assertIs(bool("hello"), True)
self.assertIs(bool(""), False)
self.assertIs(bool(), False)
def test_format(self):
self.assertEqual("%d" % False, "0")
self.assertEqual("%d" % True, "1")
self.assertEqual("%x" % False, "0")
self.assertEqual("%x" % True, "1")
def test_hasattr(self):
self.assertIs(hasattr([], "append"), True)
self.assertIs(hasattr([], "wobble"), False)
def test_callable(self):
with test_support.check_py3k_warnings():
self.assertIs(callable(len), True)
self.assertIs(callable(1), False)
def test_isinstance(self):
self.assertIs(isinstance(True, bool), True)
self.assertIs(isinstance(False, bool), True)
self.assertIs(isinstance(True, int), True)
self.assertIs(isinstance(False, int), True)
self.assertIs(isinstance(1, bool), False)
self.assertIs(isinstance(0, bool), False)
def test_issubclass(self):
self.assertIs(issubclass(bool, int), True)
self.assertIs(issubclass(int, bool), False)
def test_haskey(self):
self.assertIs(1 in {}, False)
self.assertIs(1 in {1:1}, True)
with test_support.check_py3k_warnings():
self.assertIs({}.has_key(1), False)
self.assertIs({1:1}.has_key(1), True)
def test_string(self):
self.assertIs("xyz".endswith("z"), True)
self.assertIs("xyz".endswith("x"), False)
self.assertIs("xyz0123".isalnum(), True)
self.assertIs("@#$%".isalnum(), False)
self.assertIs("xyz".isalpha(), True)
self.assertIs("@#$%".isalpha(), False)
self.assertIs("0123".isdigit(), True)
self.assertIs("xyz".isdigit(), False)
self.assertIs("xyz".islower(), True)
self.assertIs("XYZ".islower(), False)
self.assertIs(" ".isspace(), True)
self.assertIs("XYZ".isspace(), False)
self.assertIs("X".istitle(), True)
self.assertIs("x".istitle(), False)
self.assertIs("XYZ".isupper(), True)
self.assertIs("xyz".isupper(), False)
self.assertIs("xyz".startswith("x"), True)
self.assertIs("xyz".startswith("z"), False)
if test_support.have_unicode:
self.assertIs(unicode("xyz", 'ascii').endswith(unicode("z", 'ascii')), True)
self.assertIs(unicode("xyz", 'ascii').endswith(unicode("x", 'ascii')), False)
self.assertIs(unicode("xyz0123", 'ascii').isalnum(), True)
self.assertIs(unicode("@#$%", 'ascii').isalnum(), False)
self.assertIs(unicode("xyz", 'ascii').isalpha(), True)
self.assertIs(unicode("@#$%", 'ascii').isalpha(), False)
self.assertIs(unicode("0123", 'ascii').isdecimal(), True)
self.assertIs(unicode("xyz", 'ascii').isdecimal(), False)
self.assertIs(unicode("0123", 'ascii').isdigit(), True)
self.assertIs(unicode("xyz", 'ascii').isdigit(), False)
self.assertIs(unicode("xyz", 'ascii').islower(), True)
self.assertIs(unicode("XYZ", 'ascii').islower(), False)
self.assertIs(unicode("0123", 'ascii').isnumeric(), True)
self.assertIs(unicode("xyz", 'ascii').isnumeric(), False)
self.assertIs(unicode(" ", 'ascii').isspace(), True)
self.assertIs(unicode("XYZ", 'ascii').isspace(), False)
self.assertIs(unicode("X", 'ascii').istitle(), True)
self.assertIs(unicode("x", 'ascii').istitle(), False)
self.assertIs(unicode("XYZ", 'ascii').isupper(), True)
self.assertIs(unicode("xyz", 'ascii').isupper(), False)
self.assertIs(unicode("xyz", 'ascii').startswith(unicode("x", 'ascii')), True)
self.assertIs(unicode("xyz", 'ascii').startswith(unicode("z", 'ascii')), False)
def test_boolean(self):
self.assertEqual(True & 1, 1)
self.assertNotIsInstance(True & 1, bool)
self.assertIs(True & True, True)
self.assertEqual(True | 1, 1)
self.assertNotIsInstance(True | 1, bool)
self.assertIs(True | True, True)
self.assertEqual(True ^ 1, 0)
self.assertNotIsInstance(True ^ 1, bool)
self.assertIs(True ^ True, False)
def test_fileclosed(self):
try:
f = file(test_support.TESTFN, "w")
self.assertIs(f.closed, False)
f.close()
self.assertIs(f.closed, True)
finally:
os.remove(test_support.TESTFN)
def test_types(self):
# types are always true.
for t in [bool, complex, dict, file, float, int, list, long, object,
set, str, tuple, type]:
self.assertIs(bool(t), True)
def test_operator(self):
import operator
self.assertIs(operator.truth(0), False)
self.assertIs(operator.truth(1), True)
with test_support.check_py3k_warnings():
self.assertIs(operator.isCallable(0), False)
self.assertIs(operator.isCallable(len), True)
self.assertIs(operator.isNumberType(None), False)
self.assertIs(operator.isNumberType(0), True)
self.assertIs(operator.not_(1), False)
self.assertIs(operator.not_(0), True)
self.assertIs(operator.isSequenceType(0), False)
self.assertIs(operator.isSequenceType([]), True)
self.assertIs(operator.contains([], 1), False)
self.assertIs(operator.contains([1], 1), True)
self.assertIs(operator.isMappingType(1), False)
self.assertIs(operator.isMappingType({}), True)
self.assertIs(operator.lt(0, 0), False)
self.assertIs(operator.lt(0, 1), True)
self.assertIs(operator.is_(True, True), True)
self.assertIs(operator.is_(True, False), False)
self.assertIs(operator.is_not(True, True), False)
self.assertIs(operator.is_not(True, False), True)
def test_marshal(self):
import marshal
self.assertIs(marshal.loads(marshal.dumps(True)), True)
self.assertIs(marshal.loads(marshal.dumps(False)), False)
def test_pickle(self):
import pickle
self.assertIs(pickle.loads(pickle.dumps(True)), True)
self.assertIs(pickle.loads(pickle.dumps(False)), False)
self.assertIs(pickle.loads(pickle.dumps(True, True)), True)
self.assertIs(pickle.loads(pickle.dumps(False, True)), False)
def test_cpickle(self):
import cPickle
self.assertIs(cPickle.loads(cPickle.dumps(True)), True)
self.assertIs(cPickle.loads(cPickle.dumps(False)), False)
self.assertIs(cPickle.loads(cPickle.dumps(True, True)), True)
self.assertIs(cPickle.loads(cPickle.dumps(False, True)), False)
def test_mixedpickle(self):
import pickle, cPickle
self.assertIs(pickle.loads(cPickle.dumps(True)), True)
self.assertIs(pickle.loads(cPickle.dumps(False)), False)
self.assertIs(pickle.loads(cPickle.dumps(True, True)), True)
self.assertIs(pickle.loads(cPickle.dumps(False, True)), False)
self.assertIs(cPickle.loads(pickle.dumps(True)), True)
self.assertIs(cPickle.loads(pickle.dumps(False)), False)
self.assertIs(cPickle.loads(pickle.dumps(True, True)), True)
self.assertIs(cPickle.loads(pickle.dumps(False, True)), False)
def test_picklevalues(self):
import pickle, cPickle
# Test for specific backwards-compatible pickle values
self.assertEqual(pickle.dumps(True), "I01\n.")
self.assertEqual(pickle.dumps(False), "I00\n.")
self.assertEqual(cPickle.dumps(True), "I01\n.")
self.assertEqual(cPickle.dumps(False), "I00\n.")
self.assertEqual(pickle.dumps(True, True), "I01\n.")
self.assertEqual(pickle.dumps(False, True), "I00\n.")
self.assertEqual(cPickle.dumps(True, True), "I01\n.")
self.assertEqual(cPickle.dumps(False, True), "I00\n.")
def test_convert_to_bool(self):
# Verify that TypeError occurs when bad things are returned
# from __nonzero__(). This isn't really a bool test, but
# it's related.
check = lambda o: self.assertRaises(TypeError, bool, o)
class Foo(object):
def __nonzero__(self):
return self
check(Foo())
class Bar(object):
def __nonzero__(self):
return "Yes"
check(Bar())
class Baz(int):
def __nonzero__(self):
return self
check(Baz())
def test_main():
test_support.run_unittest(BoolTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
apporc/cinder | cinder/volume/drivers/huawei/huawei_driver.py | 3 | 55022 | # Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import six
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import fc_zone_helper
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import hypermetro
from cinder.volume.drivers.huawei import rest_client
from cinder.volume.drivers.huawei import smartx
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
huawei_opts = [
cfg.StrOpt('cinder_huawei_conf_file',
default='/etc/cinder/cinder_huawei_conf.xml',
help='The configuration file for the Cinder Huawei driver.'),
cfg.StrOpt('hypermetro_devices',
help='The remote device hypermetro will use.'),
]
CONF = cfg.CONF
CONF.register_opts(huawei_opts)
class HuaweiBaseDriver(driver.VolumeDriver):
def __init__(self, *args, **kwargs):
super(HuaweiBaseDriver, self).__init__(*args, **kwargs)
self.configuration = kwargs.get('configuration')
if not self.configuration:
msg = _('_instantiate_driver: configuration not found.')
raise exception.InvalidInput(reason=msg)
self.configuration.append_config_values(huawei_opts)
self.xml_file_path = self.configuration.cinder_huawei_conf_file
self.hypermetro_devices = self.configuration.hypermetro_devices
def do_setup(self, context):
"""Instantiate common class and login storage system."""
self.restclient = rest_client.RestClient(self.configuration)
return self.restclient.login()
def check_for_setup_error(self):
"""Check configuration file."""
return huawei_utils.check_conf_file(self.xml_file_path)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
return self.restclient.update_volume_stats()
@utils.synchronized('huawei', external=True)
def create_volume(self, volume):
"""Create a volume."""
opts = huawei_utils.get_volume_params(volume)
smartx_opts = smartx.SmartX().get_smartx_specs_opts(opts)
params = huawei_utils.get_lun_params(self.xml_file_path,
smartx_opts)
pool_name = volume_utils.extract_host(volume['host'],
level='pool')
pools = self.restclient.find_all_pools()
pool_info = self.restclient.find_pool_info(pool_name, pools)
if not pool_info:
# The following code is to keep compatibility with old version of
# Huawei driver.
pool_names = huawei_utils.get_pools(self.xml_file_path)
for pool_name in pool_names.split(";"):
pool_info = self.restclient.find_pool_info(pool_name,
pools)
if pool_info:
break
volume_name = huawei_utils.encode_name(volume['id'])
volume_description = volume['name']
volume_size = huawei_utils.get_volume_size(volume)
LOG.info(_LI(
'Create volume: %(volume)s, size: %(size)s.'),
{'volume': volume_name,
'size': volume_size})
params['pool_id'] = pool_info['ID']
params['volume_size'] = volume_size
params['volume_description'] = volume_description
# Prepare LUN parameters.
lun_param = huawei_utils.init_lun_parameters(volume_name, params)
# Create LUN on the array.
lun_info = self.restclient.create_volume(lun_param)
lun_id = lun_info['ID']
try:
qos = huawei_utils.get_volume_qos(volume)
if qos:
smart_qos = smartx.SmartQos(self.restclient)
smart_qos.create_qos(qos, lun_id)
smartpartition = smartx.SmartPartition(self.restclient)
smartpartition.add(opts, lun_id)
smartcache = smartx.SmartCache(self.restclient)
smartcache.add(opts, lun_id)
except Exception as err:
self._delete_lun_with_check(lun_id)
raise exception.InvalidInput(
reason=_('Create volume error. Because %s.') % err)
# Update the metadata.
LOG.info(_LI('Create volume option: %s.'), opts)
metadata = huawei_utils.get_volume_metadata(volume)
if opts.get('hypermetro'):
hyperm = hypermetro.HuaweiHyperMetro(self.restclient, None,
self.configuration)
try:
metro_id, remote_lun_id = hyperm.create_hypermetro(lun_id,
lun_param)
except exception.VolumeBackendAPIException as err:
LOG.exception(_LE('Create hypermetro error: %s.'), err)
self._delete_lun_with_check(lun_id)
raise
LOG.info(_LI("Hypermetro id: %(metro_id)s. "
"Remote lun id: %(remote_lun_id)s."),
{'metro_id': metro_id,
'remote_lun_id': remote_lun_id})
metadata.update({'hypermetro_id': metro_id,
'remote_lun_id': remote_lun_id})
return {'provider_location': lun_id,
'ID': lun_id,
'metadata': metadata}
@utils.synchronized('huawei', external=True)
def delete_volume(self, volume):
"""Delete a volume.
Three steps:
Firstly, remove associate from lungroup.
Secondly, remove associate from QoS policy.
Thirdly, remove the lun.
"""
name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location')
LOG.info(_LI('Delete volume: %(name)s, array lun id: %(lun_id)s.'),
{'name': name, 'lun_id': lun_id},)
if lun_id:
if self.restclient.check_lun_exist(lun_id):
qos_id = self.restclient.get_qosid_by_lunid(lun_id)
if qos_id:
self.remove_qos_lun(lun_id, qos_id)
metadata = huawei_utils.get_volume_metadata(volume)
if 'hypermetro_id' in metadata:
hyperm = hypermetro.HuaweiHyperMetro(self.restclient, None,
self.configuration)
try:
hyperm.delete_hypermetro(volume)
except exception.VolumeBackendAPIException as err:
LOG.exception(_LE('Delete hypermetro error: %s.'), err)
self.restclient.delete_lun(lun_id)
raise
self.restclient.delete_lun(lun_id)
else:
LOG.warning(_LW("Can't find lun %s on the array."), lun_id)
return False
return True
def remove_qos_lun(self, lun_id, qos_id):
lun_list = self.restclient.get_lun_list_in_qos(qos_id)
lun_count = len(lun_list)
if lun_count <= 1:
qos = smartx.SmartQos(self.restclient)
qos.delete_qos(qos_id)
else:
self.restclient.remove_lun_from_qos(lun_id,
lun_list,
qos_id)
def _delete_lun_with_check(self, lun_id):
if lun_id:
if self.restclient.check_lun_exist(lun_id):
qos_id = self.restclient.get_qosid_by_lunid(lun_id)
if qos_id:
self.remove_qos_lun(lun_id, qos_id)
self.restclient.delete_lun(lun_id)
def _is_lun_migration_complete(self, src_id, dst_id):
result = self.restclient.get_lun_migration_task()
found_migration_task = False
if 'data' in result:
for item in result['data']:
if (src_id == item['PARENTID']
and dst_id == item['TARGETLUNID']):
found_migration_task = True
if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']:
return True
if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']:
err_msg = _('Lun migration error.')
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
if not found_migration_task:
err_msg = _("Cannot find migration task.")
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return False
def _is_lun_migration_exist(self, src_id, dst_id):
try:
result = self.restclient.get_lun_migration_task()
except Exception:
LOG.error(_LE("Get LUN migration error."))
return False
if 'data' in result:
for item in result['data']:
if (src_id == item['PARENTID']
and dst_id == item['TARGETLUNID']):
return True
return False
def _migrate_lun(self, src_id, dst_id):
try:
self.restclient.create_lun_migration(src_id, dst_id)
def _is_lun_migration_complete():
return self._is_lun_migration_complete(src_id, dst_id)
wait_interval = constants.MIGRATION_WAIT_INTERVAL
huawei_utils.wait_for_condition(self.xml_file_path,
_is_lun_migration_complete,
wait_interval)
# Clean up if migration failed.
except Exception as ex:
raise exception.VolumeBackendAPIException(data=ex)
finally:
if self._is_lun_migration_exist(src_id, dst_id):
self.restclient.delete_lun_migration(src_id, dst_id)
self._delete_lun_with_check(dst_id)
LOG.debug("Migrate lun %s successfully.", src_id)
return True
def _wait_volume_ready(self, lun_id):
event_type = 'LUNReadyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
def _volume_ready():
result = self.restclient.get_lun_info(lun_id)
if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH
and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY):
return True
return False
huawei_utils.wait_for_condition(self.xml_file_path,
_volume_ready,
wait_interval,
wait_interval * 10)
def _get_original_status(self, volume):
if not volume['volume_attachment']:
return 'available'
else:
return 'in-use'
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status=None):
original_name = huawei_utils.encode_name(volume['id'])
current_name = huawei_utils.encode_name(new_volume['id'])
lun_id = self.restclient.get_volume_by_name(current_name)
try:
self.restclient.rename_lun(lun_id, original_name)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Unable to rename lun %s on array.'), current_name)
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
LOG.debug("Rename lun from %(current_name)s to %(original_name)s "
"successfully.",
{'current_name': current_name,
'original_name': original_name})
model_update = {'_name_id': None}
return model_update
def migrate_volume(self, ctxt, volume, host, new_type=None):
"""Migrate a volume within the same array."""
return self._migrate_volume(volume, host, new_type)
def _check_migration_valid(self, host, volume):
if 'pool_name' not in host['capabilities']:
return False
target_device = host['capabilities']['location_info']
# Source and destination should be on same array.
if target_device != self.restclient.device_id:
return False
# Same protocol should be used if volume is in-use.
protocol = huawei_utils.get_protocol(self.xml_file_path)
if (host['capabilities']['storage_protocol'] != protocol
and self._get_original_status(volume) == 'in-use'):
return False
pool_name = host['capabilities']['pool_name']
if len(pool_name) == 0:
return False
return True
def _migrate_volume(self, volume, host, new_type=None):
if not self._check_migration_valid(host, volume):
return (False, None)
type_id = volume['volume_type_id']
volume_type = None
if type_id:
volume_type = volume_types.get_volume_type(None, type_id)
pool_name = host['capabilities']['pool_name']
pools = self.restclient.find_all_pools()
pool_info = self.restclient.find_pool_info(pool_name, pools)
src_volume_name = huawei_utils.encode_name(volume['id'])
dst_volume_name = six.text_type(hash(src_volume_name))
src_id = volume.get('provider_location')
src_lun_params = self.restclient.get_lun_info(src_id)
opts = None
qos = None
if new_type:
# If new type exists, use new type.
opts = huawei_utils._get_extra_spec_value(
new_type['extra_specs'])
opts = smartx.SmartX().get_smartx_specs_opts(opts)
if 'LUNType' not in opts:
opts['LUNType'] = huawei_utils.find_luntype_in_xml(
self.xml_file_path)
qos = huawei_utils.get_qos_by_volume_type(new_type)
elif volume_type:
qos = huawei_utils.get_qos_by_volume_type(volume_type)
if not opts:
opts = huawei_utils.get_volume_params(volume)
opts = smartx.SmartX().get_smartx_specs_opts(opts)
lun_info = self._create_lun_with_extra_feature(pool_info,
dst_volume_name,
src_lun_params,
opts)
lun_id = lun_info['ID']
if qos:
LOG.info(_LI('QoS: %s.'), qos)
SmartQos = smartx.SmartQos(self.restclient)
SmartQos.create_qos(qos, lun_id)
if opts:
smartpartition = smartx.SmartPartition(self.restclient)
smartpartition.add(opts, lun_id)
smartcache = smartx.SmartCache(self.restclient)
smartcache.add(opts, lun_id)
dst_id = lun_info['ID']
self._wait_volume_ready(dst_id)
moved = self._migrate_lun(src_id, dst_id)
return moved, {}
def _create_lun_with_extra_feature(self, pool_info,
lun_name,
lun_params,
spec_opts):
LOG.info(_LI('Create a new lun %s for migration.'), lun_name)
# Prepare lun parameters.
lunparam = {"TYPE": '11',
"NAME": lun_name,
"PARENTTYPE": '216',
"PARENTID": pool_info['ID'],
"ALLOCTYPE": lun_params['ALLOCTYPE'],
"CAPACITY": lun_params['CAPACITY'],
"WRITEPOLICY": lun_params['WRITEPOLICY'],
"MIRRORPOLICY": lun_params['MIRRORPOLICY'],
"PREFETCHPOLICY": lun_params['PREFETCHPOLICY'],
"PREFETCHVALUE": lun_params['PREFETCHVALUE'],
"DATATRANSFERPOLICY": '0',
"READCACHEPOLICY": lun_params['READCACHEPOLICY'],
"WRITECACHEPOLICY": lun_params['WRITECACHEPOLICY'],
"OWNINGCONTROLLER": lun_params['OWNINGCONTROLLER'],
}
if 'LUNType' in spec_opts:
lunparam['ALLOCTYPE'] = spec_opts['LUNType']
if spec_opts['policy']:
lunparam['DATATRANSFERPOLICY'] = spec_opts['policy']
lun_info = self.restclient.create_volume(lunparam)
return lun_info
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
We use LUNcopy to copy a new volume from snapshot.
The time needed increases as volume size does.
"""
snapshotname = huawei_utils.encode_name(snapshot['id'])
snapshot_id = snapshot.get('provider_location')
if snapshot_id is None:
snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)
if snapshot_id is None:
err_msg = (_(
'create_volume_from_snapshot: Snapshot %(name)s '
'does not exist.')
% {'name': snapshotname})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
lun_info = self.create_volume(volume)
tgt_lun_id = lun_info['ID']
luncopy_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, '
'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.'),
{'src_lun_id': snapshot_id,
'tgt_lun_id': tgt_lun_id,
'copy_name': luncopy_name})
event_type = 'LUNReadyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
def _volume_ready():
result = self.restclient.get_lun_info(tgt_lun_id)
if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH
and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY):
return True
return False
huawei_utils.wait_for_condition(self.xml_file_path,
_volume_ready,
wait_interval,
wait_interval * 10)
self._copy_volume(volume, luncopy_name,
snapshot_id, tgt_lun_id)
return {'ID': lun_info['ID'],
'lun_info': lun_info}
def create_cloned_volume(self, volume, src_vref):
"""Clone a new volume from an existing volume."""
# Form the snapshot structure.
snapshot = {'id': uuid.uuid4().__str__(),
'volume_id': src_vref['id'],
'volume': src_vref}
# Create snapshot.
self.create_snapshot(snapshot)
try:
# Create volume from snapshot.
lun_info = self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
# Delete snapshot.
self.delete_snapshot(snapshot)
except exception.VolumeBackendAPIException:
LOG.warning(_LW(
'Failure deleting the snapshot %(snapshot_id)s '
'of volume %(volume_id)s.'),
{'snapshot_id': snapshot['id'],
'volume_id': src_vref['id']},)
return {'provider_location': lun_info['ID'],
'lun_info': lun_info}
@utils.synchronized('huawei', external=True)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
volume_size = huawei_utils.get_volume_size(volume)
new_volume_size = int(new_size) * units.Gi / 512
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'Extend volume: %(volumename)s, oldsize:'
' %(oldsize)s newsize: %(newsize)s.'),
{'volumename': volume_name,
'oldsize': volume_size,
'newsize': new_volume_size},)
lun_id = self.restclient.get_lunid(volume, volume_name)
luninfo = self.restclient.extend_volume(lun_id, new_volume_size)
return {'provider_location': luninfo['ID'],
'lun_info': luninfo}
@utils.synchronized('huawei', external=True)
def create_snapshot(self, snapshot):
snapshot_info = self.restclient.create_snapshot(snapshot)
snapshot_id = snapshot_info['ID']
self.restclient.activate_snapshot(snapshot_id)
return {'provider_location': snapshot_info['ID'],
'lun_info': snapshot_info}
@utils.synchronized('huawei', external=True)
def delete_snapshot(self, snapshot):
snapshotname = huawei_utils.encode_name(snapshot['id'])
volume_name = huawei_utils.encode_name(snapshot['volume_id'])
LOG.info(_LI(
'stop_snapshot: snapshot name: %(snapshot)s, '
'volume name: %(volume)s.'),
{'snapshot': snapshotname,
'volume': volume_name},)
snapshot_id = snapshot.get('provider_location')
if snapshot_id is None:
snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)
if snapshot_id is not None:
if self.restclient.check_snapshot_exist(snapshot_id):
self.restclient.stop_snapshot(snapshot_id)
self.restclient.delete_snapshot(snapshot_id)
else:
LOG.warning(_LW("Can't find snapshot on the array."))
else:
LOG.warning(_LW("Can't find snapshot on the array."))
return False
return True
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, "
"diff=%(diff)s, host=%(host)s.", {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
# Check what changes are needed
migration, change_opts, lun_id = self.determine_changes_when_retype(
volume, new_type, host)
try:
if migration:
LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with "
"change %(change_opts)s.",
{"lun_id": lun_id, "change_opts": change_opts})
if self._migrate_volume(volume, host, new_type):
return True
else:
LOG.warning(_LW("Storage-assisted migration failed during "
"retype."))
return False
else:
# Modify lun to change policy
self.modify_lun(lun_id, change_opts)
return True
except exception.VolumeBackendAPIException:
LOG.exception(_LE('Retype volume error.'))
return False
def modify_lun(self, lun_id, change_opts):
if change_opts.get('partitionid'):
old, new = change_opts['partitionid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.restclient.remove_lun_from_partition(lun_id, old_id)
if new_id:
self.restclient.add_lun_to_partition(lun_id, new_id)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartpartition from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) success."),
{"lun_id": lun_id,
"old_id": old_id, "old_name": old_name,
"new_id": new_id, "new_name": new_name})
if change_opts.get('cacheid'):
old, new = change_opts['cacheid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.restclient.remove_lun_from_cache(lun_id, old_id)
if new_id:
self.restclient.add_lun_to_cache(lun_id, new_id)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartcache from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) successfully."),
{'lun_id': lun_id,
'old_id': old_id, "old_name": old_name,
'new_id': new_id, "new_name": new_name})
if change_opts.get('policy'):
old_policy, new_policy = change_opts['policy']
self.restclient.change_lun_smarttier(lun_id, new_policy)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smarttier policy from "
"%(old_policy)s to %(new_policy)s success."),
{'lun_id': lun_id,
'old_policy': old_policy,
'new_policy': new_policy})
if change_opts.get('qos'):
old_qos, new_qos = change_opts['qos']
old_qos_id = old_qos[0]
old_qos_value = old_qos[1]
if old_qos_id:
self.remove_qos_lun(lun_id, old_qos_id)
if new_qos:
smart_qos = smartx.SmartQos(self.restclient)
smart_qos.create_qos(new_qos, lun_id)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartqos from "
"%(old_qos_value)s to %(new_qos)s success."),
{'lun_id': lun_id,
'old_qos_value': old_qos_value,
'new_qos': new_qos})
def get_lun_specs(self, lun_id):
lun_opts = {
'policy': None,
'partitionid': None,
'cacheid': None,
'LUNType': None,
}
lun_info = self.restclient.get_lun_info(lun_id)
lun_opts['LUNType'] = int(lun_info['ALLOCTYPE'])
if lun_info['DATATRANSFERPOLICY']:
lun_opts['policy'] = lun_info['DATATRANSFERPOLICY']
if lun_info['SMARTCACHEPARTITIONID']:
lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID']
if lun_info['CACHEPARTITIONID']:
lun_opts['partitionid'] = lun_info['CACHEPARTITIONID']
return lun_opts
def determine_changes_when_retype(self, volume, new_type, host):
migration = False
change_opts = {
'policy': None,
'partitionid': None,
'cacheid': None,
'qos': None,
'host': None,
'LUNType': None,
}
lun_id = volume.get('provider_location')
old_opts = self.get_lun_specs(lun_id)
new_specs = new_type['extra_specs']
new_opts = huawei_utils._get_extra_spec_value(new_specs)
new_opts = smartx.SmartX().get_smartx_specs_opts(new_opts)
if 'LUNType' not in new_opts:
new_opts['LUNType'] = huawei_utils.find_luntype_in_xml(
self.xml_file_path)
if volume['host'] != host['host']:
migration = True
change_opts['host'] = (volume['host'], host['host'])
if old_opts['LUNType'] != new_opts['LUNType']:
migration = True
change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType'])
new_cache_id = None
new_cache_name = new_opts['cachename']
if new_cache_name:
new_cache_id = self.restclient.get_cache_id_by_name(new_cache_name)
if new_cache_id is None:
msg = (_(
"Can't find cache name on the array, cache name is: "
"%(name)s.") % {'name': new_cache_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
new_partition_id = None
new_partition_name = new_opts['partitionname']
if new_partition_name:
new_partition_id = self.restclient.get_partition_id_by_name(
new_partition_name)
if new_partition_id is None:
msg = (_(
"Can't find partition name on the array, partition name "
"is: %(name)s.") % {'name': new_partition_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# smarttier
if old_opts['policy'] != new_opts['policy']:
change_opts['policy'] = (old_opts['policy'], new_opts['policy'])
# smartcache
old_cache_id = old_opts['cacheid']
if old_cache_id != new_cache_id:
old_cache_name = None
if old_cache_id:
cache_info = self.restclient.get_cache_info_by_id(old_cache_id)
old_cache_name = cache_info['NAME']
change_opts['cacheid'] = ([old_cache_id, old_cache_name],
[new_cache_id, new_cache_name])
# smartpartition
old_partition_id = old_opts['partitionid']
if old_partition_id != new_partition_id:
old_partition_name = None
if old_partition_id:
partition_info = self.restclient.get_partition_info_by_id(
old_partition_id)
old_partition_name = partition_info['NAME']
change_opts['partitionid'] = ([old_partition_id,
old_partition_name],
[new_partition_id,
new_partition_name])
# smartqos
new_qos = huawei_utils.get_qos_by_volume_type(new_type)
old_qos_id = self.restclient.get_qosid_by_lunid(lun_id)
old_qos = self._get_qos_specs_from_array(old_qos_id)
if old_qos != new_qos:
change_opts['qos'] = ([old_qos_id, old_qos], new_qos)
LOG.debug("Determine changes when retype. Migration: "
"%(migration)s, change_opts: %(change_opts)s.",
{'migration': migration, 'change_opts': change_opts})
return migration, change_opts, lun_id
def _get_qos_specs_from_array(self, qos_id):
qos = {}
qos_info = {}
if qos_id:
qos_info = self.restclient.get_qos_info(qos_id)
for key, value in qos_info.items():
if key.upper() in constants.QOS_KEYS:
if key.upper() == 'LATENCY' and value == '0':
continue
else:
qos[key.upper()] = value
return qos
def create_export(self, context, volume, connector):
"""Export a volume."""
pass
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def _copy_volume(self, volume, copy_name, src_lun, tgt_lun):
luncopy_id = self.restclient.create_luncopy(copy_name,
src_lun, tgt_lun)
event_type = 'LUNcopyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
try:
self.restclient.start_luncopy(luncopy_id)
def _luncopy_complete():
luncopy_info = self.restclient.get_luncopy_info(luncopy_id)
if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY:
# luncopy_info['status'] means for the running status of
# the luncopy. If luncopy_info['status'] is equal to '40',
# this luncopy is completely ready.
return True
elif luncopy_info['state'] != constants.STATUS_HEALTH:
# luncopy_info['state'] means for the healthy status of the
# luncopy. If luncopy_info['state'] is not equal to '1',
# this means that an error occurred during the LUNcopy
# operation and we should abort it.
err_msg = (_(
'An error occurred during the LUNcopy operation. '
'LUNcopy name: %(luncopyname)s. '
'LUNcopy status: %(luncopystatus)s. '
'LUNcopy state: %(luncopystate)s.')
% {'luncopyname': luncopy_id,
'luncopystatus': luncopy_info['status'],
'luncopystate': luncopy_info['state']},)
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
huawei_utils.wait_for_condition(self.xml_file_path,
_luncopy_complete,
wait_interval)
except Exception:
with excutils.save_and_reraise_exception():
self.restclient.delete_luncopy(luncopy_id)
self.delete_volume(volume)
self.restclient.delete_luncopy(luncopy_id)
class Huawei18000ISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver):
"""ISCSI driver for Huawei OceanStor 18000 storage arrays.
Version history:
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
1.1.1 - Code refactor
CHAP support
Multiple pools support
ISCSI multipath support
SmartX support
Volume migration support
Volume retype support
"""
VERSION = "1.1.1"
def __init__(self, *args, **kwargs):
super(Huawei18000ISCSIDriver, self).__init__(*args, **kwargs)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'iSCSI'
data['driver_version'] = self.VERSION
data['vendor_name'] = 'Huawei'
return data
@utils.synchronized('huawei', external=True)
def initialize_connection(self, volume, connector):
"""Map a volume to a host and return target iSCSI information."""
LOG.info(_LI('Enter initialize_connection.'))
initiator_name = connector['initiator']
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'initiator name: %(initiator_name)s, '
'volume name: %(volume)s.'),
{'initiator_name': initiator_name,
'volume': volume_name})
(iscsi_iqns,
target_ips,
portgroup_id) = self.restclient.get_iscsi_params(self.xml_file_path,
connector)
LOG.info(_LI('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, '
'target_ip: %(target_ip)s, '
'portgroup_id: %(portgroup_id)s.'),
{'iscsi_iqn': iscsi_iqns,
'target_ip': target_ips,
'portgroup_id': portgroup_id},)
# Create hostgroup if not exist.
host_name = connector['host']
host_name_before_hash = None
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name_before_hash = host_name
host_name = six.text_type(hash(host_name))
host_id = self.restclient.add_host_with_check(host_name,
host_name_before_hash)
# Add initiator to the host.
self.restclient.ensure_initiator_added(self.xml_file_path,
initiator_name,
host_id)
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
lun_id = self.restclient.get_lunid(volume, volume_name)
# Mapping lungroup and hostgroup to view.
self.restclient.do_mapping(lun_id, hostgroup_id,
host_id, portgroup_id)
hostlun_id = self.restclient.find_host_lun_id(host_id, lun_id)
LOG.info(_LI("initialize_connection, host lun id is: %s."),
hostlun_id)
iscsi_conf = huawei_utils.get_iscsi_conf(self.xml_file_path)
chapinfo = self.restclient.find_chap_info(iscsi_conf,
initiator_name)
# Return iSCSI properties.
properties = {}
properties['target_discovered'] = False
properties['volume_id'] = volume['id']
multipath = connector.get('multipath', False)
hostlun_id = int(hostlun_id)
if not multipath:
properties['target_portal'] = ('%s:3260' % target_ips[0])
properties['target_iqn'] = iscsi_iqns[0]
properties['target_lun'] = hostlun_id
else:
properties['target_iqns'] = [iqn for iqn in iscsi_iqns]
properties['target_portals'] = [
'%s:3260' % ip for ip in target_ips]
properties['target_luns'] = [hostlun_id] * len(target_ips)
# If use CHAP, return CHAP info.
if chapinfo:
chap_username, chap_password = chapinfo.split(';')
properties['auth_method'] = 'CHAP'
properties['auth_username'] = chap_username
properties['auth_password'] = chap_password
LOG.info(_LI("initialize_connection success. Return data: %s."),
properties)
return {'driver_volume_type': 'iscsi', 'data': properties}
@utils.synchronized('huawei', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Delete map between a volume and a host."""
initiator_name = connector['initiator']
volume_name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location')
host_name = connector['host']
lungroup_id = None
LOG.info(_LI(
'terminate_connection: volume name: %(volume)s, '
'initiator name: %(ini)s, '
'lun_id: %(lunid)s.'),
{'volume': volume_name,
'ini': initiator_name,
'lunid': lun_id},)
iscsi_conf = huawei_utils.get_iscsi_conf(self.xml_file_path)
portgroup = None
portgroup_id = None
view_id = None
left_lunnum = -1
for ini in iscsi_conf['Initiator']:
if ini['Name'] == initiator_name:
for key in ini:
if key == 'TargetPortGroup':
portgroup = ini['TargetPortGroup']
break
if portgroup:
portgroup_id = self.restclient.find_tgt_port_group(portgroup)
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name = six.text_type(hash(host_name))
host_id = self.restclient.find_host(host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.restclient.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.restclient.find_lungroup_from_map(view_id)
# Remove lun from lungroup.
if lun_id and self.restclient.check_lun_exist(lun_id):
if lungroup_id:
lungroup_ids = self.restclient.get_lungroupids_by_lunid(lun_id)
if lungroup_id in lungroup_ids:
self.restclient.remove_lun_from_lungroup(lungroup_id,
lun_id)
else:
LOG.warning(_LW("Lun is not in lungroup. "
"Lun id: %(lun_id)s. "
"lungroup id: %(lungroup_id)s."),
{"lun_id": lun_id,
"lungroup_id": lungroup_id})
else:
LOG.warning(_LW("Can't find lun on the array."))
# Remove portgroup from mapping view if no lun left in lungroup.
if lungroup_id:
left_lunnum = self.restclient.get_lunnum_from_lungroup(lungroup_id)
if portgroup_id and view_id and (int(left_lunnum) <= 0):
if self.restclient.is_portgroup_associated_to_view(view_id,
portgroup_id):
self.restclient.delete_portgroup_mapping_view(view_id,
portgroup_id)
if view_id and (int(left_lunnum) <= 0):
self.restclient.remove_chap(initiator_name)
if self.restclient.lungroup_associated(view_id, lungroup_id):
self.restclient.delete_lungroup_mapping_view(view_id,
lungroup_id)
self.restclient.delete_lungroup(lungroup_id)
if self.restclient.is_initiator_associated_to_host(initiator_name):
self.restclient.remove_iscsi_from_host(initiator_name)
hostgroup_name = constants.HOSTGROUP_PREFIX + host_id
hostgroup_id = self.restclient.find_hostgroup(hostgroup_name)
if hostgroup_id:
if self.restclient.hostgroup_associated(view_id, hostgroup_id):
self.restclient.delete_hostgoup_mapping_view(view_id,
hostgroup_id)
self.restclient.remove_host_from_hostgroup(hostgroup_id,
host_id)
self.restclient.delete_hostgroup(hostgroup_id)
self.restclient.remove_host(host_id)
self.restclient.delete_mapping_view(view_id)
class Huawei18000FCDriver(HuaweiBaseDriver, driver.FibreChannelDriver):
"""FC driver for Huawei OceanStor 18000 storage arrays.
Version history:
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
1.1.1 - Code refactor
Multiple pools support
SmartX support
Volume migration support
Volume retype support
FC zone enhancement
Volume hypermetro support
"""
VERSION = "1.1.1"
def __init__(self, *args, **kwargs):
super(Huawei18000FCDriver, self).__init__(*args, **kwargs)
self.fcsan_lookup_service = None
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
data['vendor_name'] = 'Huawei'
return data
@utils.synchronized('huawei', external=True)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
wwns = connector['wwpns']
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'initialize_connection, initiator: %(wwpns)s,'
' volume name: %(volume)s.'),
{'wwpns': wwns,
'volume': volume_name},)
lun_id = self.restclient.get_lunid(volume, volume_name)
host_name_before_hash = None
host_name = connector['host']
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name_before_hash = host_name
host_name = six.text_type(hash(host_name))
if not self.fcsan_lookup_service:
self.fcsan_lookup_service = fczm_utils.create_lookup_service()
if self.fcsan_lookup_service:
# Use FC switch.
host_id = self.restclient.add_host_with_check(
host_name, host_name_before_hash)
zone_helper = fc_zone_helper.FCZoneHelper(
self.fcsan_lookup_service, self.restclient)
(tgt_port_wwns, init_targ_map) = (
zone_helper.build_ini_targ_map(wwns))
for ini in init_targ_map:
self.restclient.ensure_fc_initiator_added(ini, host_id)
else:
# Not use FC switch.
host_id = self.restclient.add_host_with_check(
host_name, host_name_before_hash)
online_wwns_in_host = (
self.restclient.get_host_online_fc_initiators(host_id))
online_free_wwns = self.restclient.get_online_free_wwns()
for wwn in wwns:
if (wwn not in online_wwns_in_host
and wwn not in online_free_wwns):
wwns_in_host = (
self.restclient.get_host_fc_initiators(host_id))
iqns_in_host = (
self.restclient.get_host_iscsi_initiators(host_id))
if not wwns_in_host and not iqns_in_host:
self.restclient.remove_host(host_id)
msg = (_('Can not add FC initiator to host.'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for wwn in wwns:
if wwn in online_free_wwns:
self.restclient.add_fc_port_to_host(host_id, wwn)
(tgt_port_wwns, init_targ_map) = (
self.restclient.get_init_targ_map(wwns))
# Add host into hostgroup.
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
map_info = self.restclient.do_mapping(lun_id,
hostgroup_id,
host_id)
host_lun_id = self.restclient.find_host_lun_id(host_id, lun_id)
# Return FC properties.
fc_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': int(host_lun_id),
'target_discovered': True,
'target_wwn': tgt_port_wwns,
'volume_id': volume['id'],
'initiator_target_map': init_targ_map,
'map_info': map_info}, }
loc_tgt_wwn = fc_info['data']['target_wwn']
local_ini_tgt_map = fc_info['data']['initiator_target_map']
# Deal with hypermetro connection.
metadata = huawei_utils.get_volume_metadata(volume)
LOG.info(_LI("initialize_connection, metadata is: %s."), metadata)
if 'hypermetro_id' in metadata:
hyperm = hypermetro.HuaweiHyperMetro(self.restclient, None,
self.configuration)
rmt_fc_info = hyperm.connect_volume_fc(volume, connector)
rmt_tgt_wwn = rmt_fc_info['data']['target_wwn']
rmt_ini_tgt_map = rmt_fc_info['data']['initiator_target_map']
fc_info['data']['target_wwn'] = (loc_tgt_wwn + rmt_tgt_wwn)
wwns = connector['wwpns']
for wwn in wwns:
if (wwn in local_ini_tgt_map
and wwn in rmt_ini_tgt_map):
fc_info['data']['initiator_target_map'][wwn].extend(
rmt_ini_tgt_map[wwn])
elif (wwn not in local_ini_tgt_map
and wwn in rmt_ini_tgt_map):
fc_info['data']['initiator_target_map'][wwn] = (
rmt_ini_tgt_map[wwn])
# else, do nothing
loc_map_info = fc_info['data']['map_info']
rmt_map_info = rmt_fc_info['data']['map_info']
same_host_id = self._get_same_hostid(loc_map_info,
rmt_map_info)
self.restclient.change_hostlun_id(loc_map_info, same_host_id)
hyperm.rmt_client.change_hostlun_id(rmt_map_info, same_host_id)
fc_info['data']['target_lun'] = same_host_id
hyperm.rmt_client.logout()
LOG.info(_LI("Return FC info is: %s."), fc_info)
return fc_info
def _get_same_hostid(self, loc_fc_info, rmt_fc_info):
loc_aval_luns = loc_fc_info['aval_luns']
loc_aval_luns = json.loads(loc_aval_luns)
rmt_aval_luns = rmt_fc_info['aval_luns']
rmt_aval_luns = json.loads(rmt_aval_luns)
same_host_id = None
for i in range(1, 512):
if i in rmt_aval_luns and i in loc_aval_luns:
same_host_id = i
break
LOG.info(_LI("The same hostid is: %s."), same_host_id)
if not same_host_id:
msg = _("Can't find the same host id from arrays.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return same_host_id
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Delete map between a volume and a host."""
wwns = connector['wwpns']
volume_name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location')
host_name = connector['host']
left_lunnum = -1
lungroup_id = None
view_id = None
LOG.info(_LI('terminate_connection: volume name: %(volume)s, '
'wwpns: %(wwns)s, '
'lun_id: %(lunid)s.'),
{'volume': volume_name,
'wwns': wwns,
'lunid': lun_id},)
if host_name and len(host_name) > constants.MAX_HOSTNAME_LENGTH:
host_name = six.text_type(hash(host_name))
host_id = self.restclient.find_host(host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.restclient.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.restclient.find_lungroup_from_map(view_id)
if lun_id and self.restclient.check_lun_exist(lun_id):
if lungroup_id:
lungroup_ids = self.restclient.get_lungroupids_by_lunid(lun_id)
if lungroup_id in lungroup_ids:
self.restclient.remove_lun_from_lungroup(lungroup_id,
lun_id)
else:
LOG.warning(_LW("Lun is not in lungroup. "
"Lun id: %(lun_id)s. "
"Lungroup id: %(lungroup_id)s."),
{"lun_id": lun_id,
"lungroup_id": lungroup_id})
else:
LOG.warning(_LW("Can't find lun on the array."))
if lungroup_id:
left_lunnum = self.restclient.get_lunnum_from_lungroup(lungroup_id)
if int(left_lunnum) > 0:
fc_info = {'driver_volume_type': 'fibre_channel',
'data': {}}
else:
if not self.fcsan_lookup_service:
self.fcsan_lookup_service = fczm_utils.create_lookup_service()
if self.fcsan_lookup_service:
zone_helper = fc_zone_helper.FCZoneHelper(
self.fcsan_lookup_service, self.restclient)
(tgt_port_wwns, init_targ_map) = (
zone_helper.build_ini_targ_map(wwns))
else:
(tgt_port_wwns, init_targ_map) = (
self.restclient.get_init_targ_map(wwns))
for wwn in wwns:
if self.restclient.is_fc_initiator_associated_to_host(wwn):
self.restclient.remove_fc_from_host(wwn)
if lungroup_id:
if view_id and self.restclient.lungroup_associated(
view_id, lungroup_id):
self.restclient.delete_lungroup_mapping_view(view_id,
lungroup_id)
self.restclient.delete_lungroup(lungroup_id)
if host_id:
hostgroup_name = constants.HOSTGROUP_PREFIX + host_id
hostgroup_id = self.restclient.find_hostgroup(hostgroup_name)
if hostgroup_id:
if view_id and self.restclient.hostgroup_associated(
view_id, hostgroup_id):
self.restclient.delete_hostgoup_mapping_view(
view_id, hostgroup_id)
self.restclient.remove_host_from_hostgroup(
hostgroup_id, host_id)
self.restclient.delete_hostgroup(hostgroup_id)
if not self.restclient.check_fc_initiators_exist_in_host(
host_id):
self.restclient.remove_host(host_id)
if view_id:
self.restclient.delete_mapping_view(view_id)
fc_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': tgt_port_wwns,
'initiator_target_map': init_targ_map}}
# Deal with hypermetro connection.
metadata = huawei_utils.get_volume_metadata(volume)
LOG.info(_LI("Detach Volume, metadata is: %s."), metadata)
if 'hypermetro_id' in metadata:
hyperm = hypermetro.HuaweiHyperMetro(self.restclient, None,
self.configuration)
hyperm.disconnect_volume_fc(volume, connector)
LOG.info(_LI("terminate_connection, return data is: %s."),
fc_info)
return fc_info
| apache-2.0 |
andrey-malets/web-page-replay | third_party/dns/set.py | 248 | 7843 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""A simple Set class."""
class Set(object):
"""A simple set class.
Sets are not in Python until 2.3, and rdata are not immutable so
we cannot use sets.Set anyway. This class implements subset of
the 2.3 Set interface using a list as the container.
@ivar items: A list of the items which are in the set
@type items: list"""
__slots__ = ['items']
def __init__(self, items=None):
"""Initialize the set.
@param items: the initial set of items
@type items: any iterable or None
"""
self.items = []
if not items is None:
for item in items:
self.add(item)
def __repr__(self):
return "dns.simpleset.Set(%s)" % repr(self.items)
def add(self, item):
"""Add an item to the set."""
if not item in self.items:
self.items.append(item)
def remove(self, item):
"""Remove an item from the set."""
self.items.remove(item)
def discard(self, item):
"""Remove an item from the set if present."""
try:
self.items.remove(item)
except ValueError:
pass
def _clone(self):
"""Make a (shallow) copy of the set.
There is a 'clone protocol' that subclasses of this class
should use. To make a copy, first call your super's _clone()
method, and use the object returned as the new instance. Then
make shallow copies of the attributes defined in the subclass.
This protocol allows us to write the set algorithms that
return new instances (e.g. union) once, and keep using them in
subclasses.
"""
cls = self.__class__
obj = cls.__new__(cls)
obj.items = list(self.items)
return obj
def __copy__(self):
"""Make a (shallow) copy of the set."""
return self._clone()
def copy(self):
"""Make a (shallow) copy of the set."""
return self._clone()
def union_update(self, other):
"""Update the set, adding any elements from other which are not
already in the set.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
return
for item in other.items:
self.add(item)
def intersection_update(self, other):
"""Update the set, removing any elements from other which are not
in both sets.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
return
# we make a copy of the list so that we can remove items from
# the list without breaking the iterator.
for item in list(self.items):
if item not in other.items:
self.items.remove(item)
def difference_update(self, other):
"""Update the set, removing any elements from other which are in
the set.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
self.items = []
else:
for item in other.items:
self.discard(item)
def union(self, other):
"""Return a new set which is the union of I{self} and I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.union_update(other)
return obj
def intersection(self, other):
"""Return a new set which is the intersection of I{self} and I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.intersection_update(other)
return obj
def difference(self, other):
"""Return a new set which I{self} - I{other}, i.e. the items
in I{self} which are not also in I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.difference_update(other)
return obj
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __add__(self, other):
return self.union(other)
def __sub__(self, other):
return self.difference(other)
def __ior__(self, other):
self.union_update(other)
return self
def __iand__(self, other):
self.intersection_update(other)
return self
def __iadd__(self, other):
self.union_update(other)
return self
def __isub__(self, other):
self.difference_update(other)
return self
def update(self, other):
"""Update the set, adding any elements from other which are not
already in the set.
@param other: the collection of items with which to update the set
@type other: any iterable type"""
for item in other:
self.add(item)
def clear(self):
"""Make the set empty."""
self.items = []
def __eq__(self, other):
# Yes, this is inefficient but the sets we're dealing with are
# usually quite small, so it shouldn't hurt too much.
for item in self.items:
if not item in other.items:
return False
for item in other.items:
if not item in self.items:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
def __getitem__(self, i):
return self.items[i]
def __delitem__(self, i):
del self.items[i]
def __getslice__(self, i, j):
return self.items[i:j]
def __delslice__(self, i, j):
del self.items[i:j]
def issubset(self, other):
"""Is I{self} a subset of I{other}?
@rtype: bool
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
for item in self.items:
if not item in other.items:
return False
return True
def issuperset(self, other):
"""Is I{self} a superset of I{other}?
@rtype: bool
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
for item in other.items:
if not item in self.items:
return False
return True
| apache-2.0 |
Glottotopia/aagd | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/userprefs/oid.py | 2 | 8766 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - OpenID preferences
@copyright: 2007 MoinMoin:JohannesBerg
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin import wikiutil, user
from MoinMoin.widget import html
from MoinMoin.userprefs import UserPrefBase
from MoinMoin.support.python_compatibility import hash_new
try:
from MoinMoin.auth.openidrp import OpenIDAuth
from MoinMoin.util.moinoid import MoinOpenIDStore
from openid.consumer import consumer
from openid.yadis.discover import DiscoveryFailure
from openid.fetchers import HTTPFetchingError
_openid_disabled = False
except ImportError:
_openid_disabled = True
class Settings(UserPrefBase):
def __init__(self, request):
""" Initialize OpenID settings form. """
UserPrefBase.__init__(self, request)
self.request = request
self._ = request.getText
self.cfg = request.cfg
_ = self._
self.title = _("OpenID settings")
openid_auth = False
if not _openid_disabled:
for authm in self.request.cfg.auth:
if isinstance(authm, OpenIDAuth):
openid_auth = True
break
if not openid_auth:
self.allowed = lambda: False
def _handle_remove(self):
_ = self.request.getText
if not hasattr(self.request.user, 'openids'):
return
openids = self.request.user.openids[:]
for oid in self.request.user.openids:
name = "rm-%s" % hash_new('sha1', oid).hexdigest()
if name in self.request.form:
openids.remove(oid)
if not openids and len(self.request.cfg.auth) == 1:
return 'error', _("Cannot remove all OpenIDs.")
self.request.user.openids = openids
self.request.user.save()
return 'info', _("The selected OpenIDs have been removed.")
def _handle_add(self):
_ = self.request.getText
request = self.request
openid_id = request.form.get('openid_identifier', '')
if not openid_id:
return 'error', _("No OpenID given.")
if (hasattr(self.request.user, 'openids') and
openid_id in request.user.openids):
return 'error', _("OpenID is already present.")
oidconsumer = consumer.Consumer(request.session,
MoinOpenIDStore(self.request))
try:
oidreq = oidconsumer.begin(openid_id)
except HTTPFetchingError:
return 'error', _('Failed to resolve OpenID.')
except DiscoveryFailure:
return 'error', _('OpenID discovery failure, not a valid OpenID.')
else:
if oidreq is None:
return 'error', _("No OpenID given.") # ??
qstr = {'action': 'userprefs',
'handler': 'oid',
'oid.return': '1'}
return_to = request.getQualifiedURL(request.page.url(request, qstr))
trust_root = request.url_root
if oidreq.shouldSendRedirect():
redirect_url = oidreq.redirectURL(trust_root, return_to)
request.http_redirect(redirect_url)
else:
form_html = oidreq.formMarkup(trust_root, return_to,
form_tag_attrs={'id': 'openid_message'})
request.session['openid.prefs.form_html'] = form_html
def _handle_oidreturn(self):
request = self.request
_ = request.getText
oidconsumer = consumer.Consumer(request.session,
MoinOpenIDStore(request))
query = {}
for key in request.values:
query[key] = request.values[key]
qstr = {'action': 'userprefs',
'handler': 'oid',
'oid.return': '1'}
return_to = request.getQualifiedURL(request.page.url(request, qstr))
info = oidconsumer.complete(query, return_to)
if info.status == consumer.FAILURE:
return 'error', _('OpenID error: %s.') % info.message
elif info.status == consumer.CANCEL:
return 'info', _('Verification canceled.')
elif info.status == consumer.SUCCESS:
if not hasattr(self.request.user, 'openids'):
request.user.openids = []
if info.identity_url in request.user.openids:
return 'error', _("OpenID is already present.")
if user.getUserIdByOpenId(request, info.identity_url):
return 'error', _("This OpenID is already used for another account.")
# all fine
request.user.openids.append(info.identity_url)
request.user.save()
return 'info', _("OpenID added successfully.")
else:
return 'error', _('OpenID failure.')
def handle_form(self):
_ = self._
form = self.request.form
if self.request.values.has_key('oid.return'):
return self._handle_oidreturn()
if form.has_key('cancel'):
return
if self.request.method != 'POST':
return
if not wikiutil.checkTicket(self.request, form.get('ticket', '')):
return
if form.has_key('remove'):
return self._handle_remove()
if form.has_key('add'):
return self._handle_add()
def _make_form(self):
action = "%s%s" % (self.request.script_root, self.request.path)
_form = html.FORM(action=action)
_form.append(html.INPUT(type="hidden", name="action", value="userprefs"))
_form.append(html.INPUT(type="hidden", name="handler", value="oid"))
ticket = wikiutil.createTicket(self.request)
_form.append(html.INPUT(type="hidden", name="ticket", value=ticket))
return _form
def _make_row(self, label, cell, **kw):
""" Create a row in the form table.
"""
self._table.append(html.TR().extend([
html.TD(**kw).extend([html.B().append(label), ' ']),
html.TD().extend(cell),
]))
def _oidlist(self):
_ = self.request.getText
form = self._make_form()
for oid in self.request.user.openids:
name = "rm-%s" % hash_new('sha1', oid).hexdigest()
form.append(html.INPUT(type="checkbox", name=name, id=name))
form.append(html.LABEL(for_=name).append(html.Text(oid)))
form.append(html.BR())
self._make_row(_("Current OpenIDs"), [form], valign='top')
label = _("Remove selected")
form.append(html.BR())
form.append(html.INPUT(type="submit", name="remove", value=label))
def _addoidform(self):
_ = self.request.getText
form = self._make_form()
# go back to this page
form.append(html.INPUT(type="hidden", name="sub", value="oid"))
label = _("Add OpenID")
form.append(html.INPUT(type="text", size="32",
name="openid_identifier",
id="openididentifier"))
form.append(html.BR())
form.append(html.INPUT(type="submit", name="add", value=label))
self._make_row(_('Add OpenID'), [form])
def create_form(self):
""" Create the complete HTML form code. """
_ = self._
ret = html.P()
# Use the user interface language and direction
lang_attr = self.request.theme.ui_lang_attr()
ret.append(html.Raw('<div %s>' % lang_attr))
self._table = html.TABLE(border="0")
ret.append(self._table)
ret.append(html.Raw("</div>"))
request = self.request
if 'openid.prefs.form_html' in request.session:
txt = _('OpenID verification requires that you click this button:')
# create JS to automatically submit the form if possible
submitjs = """<script type="text/javascript">
<!--//
document.getElementById("openid_message").submit();
//-->
</script>
"""
oidhtml = request.session['openid.prefs.form_html']
del request.session['openid.prefs.form_html']
return ''.join([txt, oidhtml, submitjs])
if hasattr(request.user, 'openids') and request.user.openids:
self._oidlist()
self._addoidform()
form = self._make_form()
label = _("Cancel")
form.append(html.INPUT(type="submit", name='cancel', value=label))
self._make_row('', [form])
return unicode(ret)
| mit |
rew4332/tensorflow | tensorflow/contrib/layers/python/kernel_tests/bucketization_op_test.py | 31 | 1508 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bucketization_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class BucketizationOpTest(tf.test.TestCase):
def test_normal_usecase(self):
op = tf.contrib.layers.bucketize(
tf.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]),
boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.test_session() as sess:
self.assertAllEqual(expected_out, sess.run(op))
def test_invalid_boundaries_order(self):
op = tf.contrib.layers.bucketize(
tf.constant([-5, 0]), boundaries=[0, 8, 3, 11])
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(op)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
Nowheresly/odoo | openerp/osv/expression.py | 23 | 59613 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Domain expression processing
The main duty of this module is to compile a domain expression into a
SQL query. A lot of things should be documented here, but as a first
step in the right direction, some tests in test_osv_expression.yml
might give you some additional information.
For legacy reasons, a domain uses an inconsistent two-levels abstract
syntax (domains are regular Python data structures). At the first
level, a domain is an expression made of terms (sometimes called
leaves) and (domain) operators used in prefix notation. The available
operators at this level are '!', '&', and '|'. '!' is a unary 'not',
'&' is a binary 'and', and '|' is a binary 'or'. For instance, here
is a possible domain. (<term> stands for an arbitrary term, more on
this later.)::
['&', '!', <term1>, '|', <term2>, <term3>]
It is equivalent to this pseudo code using infix notation::
(not <term1>) and (<term2> or <term3>)
The second level of syntax deals with the term representation. A term
is a triple of the form (left, operator, right). That is, a term uses
an infix notation, and the available operators, and possible left and
right operands differ with those of the previous level. Here is a
possible term::
('company_id.name', '=', 'OpenERP')
The left and right operand don't have the same possible values. The
left operand is field name (related to the model for which the domain
applies). Actually, the field name can use the dot-notation to
traverse relationships. The right operand is a Python value whose
type should match the used operator and field type. In the above
example, a string is used because the name field of a company has type
string, and because we use the '=' operator. When appropriate, a 'in'
operator can be used, and thus the right operand should be a list.
Note: the non-uniform syntax could have been more uniform, but this
would hide an important limitation of the domain syntax. Say that the
term representation was ['=', 'company_id.name', 'OpenERP']. Used in a
complete domain, this would look like::
['!', ['=', 'company_id.name', 'OpenERP']]
and you would be tempted to believe something like this would be
possible::
['!', ['=', 'company_id.name', ['&', ..., ...]]]
That is, a domain could be a valid operand. But this is not the
case. A domain is really limited to a two-level nature, and can not
take a recursive form: a domain is not a valid second-level operand.
Unaccent - Accent-insensitive search
OpenERP will use the SQL function 'unaccent' when available for the
'ilike' and 'not ilike' operators, and enabled in the configuration.
Normally the 'unaccent' function is obtained from `the PostgreSQL
'unaccent' contrib module
<http://developer.postgresql.org/pgdocs/postgres/unaccent.html>`_.
.. todo: The following explanation should be moved in some external
installation guide
The steps to install the module might differ on specific PostgreSQL
versions. We give here some instruction for PostgreSQL 9.x on a
Ubuntu system.
Ubuntu doesn't come yet with PostgreSQL 9.x, so an alternative package
source is used. We use Martin Pitt's PPA available at
`ppa:pitti/postgresql
<https://launchpad.net/~pitti/+archive/postgresql>`_.
.. code-block:: sh
> sudo add-apt-repository ppa:pitti/postgresql
> sudo apt-get update
Once the package list is up-to-date, you have to install PostgreSQL
9.0 and its contrib modules.
.. code-block:: sh
> sudo apt-get install postgresql-9.0 postgresql-contrib-9.0
When you want to enable unaccent on some database:
.. code-block:: sh
> psql9 <database> -f /usr/share/postgresql/9.0/contrib/unaccent.sql
Here :program:`psql9` is an alias for the newly installed PostgreSQL
9.0 tool, together with the correct port if necessary (for instance if
PostgreSQL 8.4 is running on 5432). (Other aliases can be used for
createdb and dropdb.)
.. code-block:: sh
> alias psql9='/usr/lib/postgresql/9.0/bin/psql -p 5433'
You can check unaccent is working:
.. code-block:: sh
> psql9 <database> -c"select unaccent('hélène')"
Finally, to instruct OpenERP to really use the unaccent function, you have to
start the server specifying the ``--unaccent`` flag.
"""
import collections
import logging
import traceback
from zlib import crc32
import openerp.modules
from . import fields
from .. import SUPERUSER_ID
from ..models import MAGIC_COLUMNS, BaseModel
import openerp.tools as tools
# Domain operators.
NOT_OPERATOR = '!'
OR_OPERATOR = '|'
AND_OPERATOR = '&'
DOMAIN_OPERATORS = (NOT_OPERATOR, OR_OPERATOR, AND_OPERATOR)
# List of available term operators. It is also possible to use the '<>'
# operator, which is strictly the same as '!='; the later should be prefered
# for consistency. This list doesn't contain '<>' as it is simpified to '!='
# by the normalize_operator() function (so later part of the code deals with
# only one representation).
# Internals (i.e. not available to the user) 'inselect' and 'not inselect'
# operators are also used. In this case its right operand has the form (subselect, params).
TERM_OPERATORS = ('=', '!=', '<=', '<', '>', '>=', '=?', '=like', '=ilike',
'like', 'not like', 'ilike', 'not ilike', 'in', 'not in',
'child_of')
# A subset of the above operators, with a 'negative' semantic. When the
# expressions 'in NEGATIVE_TERM_OPERATORS' or 'not in NEGATIVE_TERM_OPERATORS' are used in the code
# below, this doesn't necessarily mean that any of those NEGATIVE_TERM_OPERATORS is
# legal in the processed term.
NEGATIVE_TERM_OPERATORS = ('!=', 'not like', 'not ilike', 'not in')
# Negation of domain expressions
DOMAIN_OPERATORS_NEGATION = {
AND_OPERATOR: OR_OPERATOR,
OR_OPERATOR: AND_OPERATOR,
}
TERM_OPERATORS_NEGATION = {
'<': '>=',
'>': '<=',
'<=': '>',
'>=': '<',
'=': '!=',
'!=': '=',
'in': 'not in',
'like': 'not like',
'ilike': 'not ilike',
'not in': 'in',
'not like': 'like',
'not ilike': 'ilike',
}
TRUE_LEAF = (1, '=', 1)
FALSE_LEAF = (0, '=', 1)
TRUE_DOMAIN = [TRUE_LEAF]
FALSE_DOMAIN = [FALSE_LEAF]
_logger = logging.getLogger(__name__)
# --------------------------------------------------
# Generic domain manipulation
# --------------------------------------------------
def normalize_domain(domain):
"""Returns a normalized version of ``domain_expr``, where all implicit '&' operators
have been made explicit. One property of normalized domain expressions is that they
can be easily combined together as if they were single domain components.
"""
assert isinstance(domain, (list, tuple)), "Domains to normalize must have a 'domain' form: a list or tuple of domain components"
if not domain:
return TRUE_DOMAIN
result = []
expected = 1 # expected number of expressions
op_arity = {NOT_OPERATOR: 1, AND_OPERATOR: 2, OR_OPERATOR: 2}
for token in domain:
if expected == 0: # more than expected, like in [A, B]
result[0:0] = [AND_OPERATOR] # put an extra '&' in front
expected = 1
result.append(token)
if isinstance(token, (list, tuple)): # domain term
expected -= 1
else:
expected += op_arity.get(token, 0) - 1
assert expected == 0, 'This domain is syntactically not correct: %s' % (domain)
return result
def combine(operator, unit, zero, domains):
"""Returns a new domain expression where all domain components from ``domains``
have been added together using the binary operator ``operator``. The given
domains must be normalized.
:param unit: the identity element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``i`` which, when
combined with any domain ``x`` via ``operator``, yields ``x``.
E.g. [(1,'=',1)] is the typical unit for AND_OPERATOR: adding it
to any domain component gives the same domain.
:param zero: the absorbing element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``z`` which, when
combined with any domain ``x`` via ``operator``, yields ``z``.
E.g. [(1,'=',1)] is the typical zero for OR_OPERATOR: as soon as
you see it in a domain component the resulting domain is the zero.
:param domains: a list of normalized domains.
"""
result = []
count = 0
for domain in domains:
if domain == unit:
continue
if domain == zero:
return zero
if domain:
result += domain
count += 1
result = [operator] * (count - 1) + result
return result
def AND(domains):
"""AND([D1,D2,...]) returns a domain representing D1 and D2 and ... """
return combine(AND_OPERATOR, TRUE_DOMAIN, FALSE_DOMAIN, domains)
def OR(domains):
"""OR([D1,D2,...]) returns a domain representing D1 or D2 or ... """
return combine(OR_OPERATOR, FALSE_DOMAIN, TRUE_DOMAIN, domains)
def distribute_not(domain):
""" Distribute any '!' domain operators found inside a normalized domain.
Because we don't use SQL semantic for processing a 'left not in right'
query (i.e. our 'not in' is not simply translated to a SQL 'not in'),
it means that a '! left in right' can not be simply processed
by __leaf_to_sql by first emitting code for 'left in right' then wrapping
the result with 'not (...)', as it would result in a 'not in' at the SQL
level.
This function is thus responsible for pushing any '!' domain operators
inside the terms themselves. For example::
['!','&',('user_id','=',4),('partner_id','in',[1,2])]
will be turned into:
['|',('user_id','!=',4),('partner_id','not in',[1,2])]
"""
# This is an iterative version of a recursive function that split domain
# into subdomains, processes them and combine the results. The "stack" below
# represents the recursive calls to be done.
result = []
stack = [False]
for token in domain:
negate = stack.pop()
# negate tells whether the subdomain starting with token must be negated
if is_leaf(token):
if negate:
left, operator, right = token
if operator in TERM_OPERATORS_NEGATION:
result.append((left, TERM_OPERATORS_NEGATION[operator], right))
else:
result.append(NOT_OPERATOR)
result.append(token)
else:
result.append(token)
elif token == NOT_OPERATOR:
stack.append(not negate)
elif token in DOMAIN_OPERATORS_NEGATION:
result.append(DOMAIN_OPERATORS_NEGATION[token] if negate else token)
stack.append(negate)
stack.append(negate)
else:
result.append(token)
return result
# --------------------------------------------------
# Generic leaf manipulation
# --------------------------------------------------
def _quote(to_quote):
if '"' not in to_quote:
return '"%s"' % to_quote
return to_quote
def generate_table_alias(src_table_alias, joined_tables=[]):
""" Generate a standard table alias name. An alias is generated as following:
- the base is the source table name (that can already be an alias)
- then, each joined table is added in the alias using a 'link field name'
that is used to render unique aliases for a given path
- returns a tuple composed of the alias, and the full table alias to be
added in a from condition with quoting done
Examples:
- src_table_alias='res_users', join_tables=[]:
alias = ('res_users','"res_users"')
- src_model='res_users', join_tables=[(res.partner, 'parent_id')]
alias = ('res_users__parent_id', '"res_partner" as "res_users__parent_id"')
:param model src_table_alias: model source of the alias
:param list joined_tables: list of tuples
(dst_model, link_field)
:return tuple: (table_alias, alias statement for from clause with quotes added)
"""
alias = src_table_alias
if not joined_tables:
return '%s' % alias, '%s' % _quote(alias)
for link in joined_tables:
alias += '__' + link[1]
# Use an alternate alias scheme if length exceeds the PostgreSQL limit
# of 63 characters.
if len(alias) >= 64:
# We have to fit a crc32 hash and one underscore
# into a 63 character alias. The remaining space we can use to add
# a human readable prefix.
alias_hash = hex(crc32(alias))[2:]
ALIAS_PREFIX_LENGTH = 63 - len(alias_hash) - 1
alias = "%s_%s" % (
alias[:ALIAS_PREFIX_LENGTH], alias_hash)
return '%s' % alias, '%s as %s' % (_quote(joined_tables[-1][0]), _quote(alias))
def get_alias_from_query(from_query):
""" :param string from_query: is something like :
- '"res_partner"' OR
- '"res_partner" as "res_users__partner_id"''
"""
from_splitted = from_query.split(' as ')
if len(from_splitted) > 1:
return from_splitted[0].replace('"', ''), from_splitted[1].replace('"', '')
else:
return from_splitted[0].replace('"', ''), from_splitted[0].replace('"', '')
def normalize_leaf(element):
""" Change a term's operator to some canonical form, simplifying later
processing. """
if not is_leaf(element):
return element
left, operator, right = element
original = operator
operator = operator.lower()
if operator == '<>':
operator = '!='
if isinstance(right, bool) and operator in ('in', 'not in'):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % ((left, original, right),))
operator = '=' if operator == 'in' else '!='
if isinstance(right, (list, tuple)) and operator in ('=', '!='):
_logger.warning("The domain term '%s' should use the 'in' or 'not in' operator." % ((left, original, right),))
operator = 'in' if operator == '=' else 'not in'
return left, operator, right
def is_operator(element):
""" Test whether an object is a valid domain operator. """
return isinstance(element, basestring) and element in DOMAIN_OPERATORS
def is_leaf(element, internal=False):
""" Test whether an object is a valid domain term:
- is a list or tuple
- with 3 elements
- second element if a valid op
:param tuple element: a leaf in form (left, operator, right)
:param boolean internal: allow or not the 'inselect' internal operator
in the term. This should be always left to False.
Note: OLD TODO change the share wizard to use this function.
"""
INTERNAL_OPS = TERM_OPERATORS + ('<>',)
if internal:
INTERNAL_OPS += ('inselect', 'not inselect')
return (isinstance(element, tuple) or isinstance(element, list)) \
and len(element) == 3 \
and element[1] in INTERNAL_OPS \
and ((isinstance(element[0], basestring) and element[0])
or tuple(element) in (TRUE_LEAF, FALSE_LEAF))
# --------------------------------------------------
# SQL utils
# --------------------------------------------------
def select_from_where(cr, select_field, from_table, where_field, where_ids, where_operator):
# todo: merge into parent query as sub-query
res = []
if where_ids:
if where_operator in ['<', '>', '>=', '<=']:
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" %s %%s' % \
(select_field, from_table, where_field, where_operator),
(where_ids[0],)) # TODO shouldn't this be min/max(where_ids) ?
res = [r[0] for r in cr.fetchall()]
else: # TODO where_operator is supposed to be 'in'? It is called with child_of...
for i in range(0, len(where_ids), cr.IN_MAX):
subids = where_ids[i:i + cr.IN_MAX]
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" IN %%s' % \
(select_field, from_table, where_field), (tuple(subids),))
res.extend([r[0] for r in cr.fetchall()])
return res
def select_distinct_from_where_not_null(cr, select_field, from_table):
cr.execute('SELECT distinct("%s") FROM "%s" where "%s" is not null' % (select_field, from_table, select_field))
return [r[0] for r in cr.fetchall()]
def get_unaccent_wrapper(cr):
if openerp.modules.registry.RegistryManager.get(cr.dbname).has_unaccent:
return lambda x: "unaccent(%s)" % (x,)
return lambda x: x
# --------------------------------------------------
# ExtendedLeaf class for managing leafs and contexts
# -------------------------------------------------
class ExtendedLeaf(object):
""" Class wrapping a domain leaf, and giving some services and management
features on it. In particular it managed join contexts to be able to
construct queries through multiple models.
"""
# --------------------------------------------------
# Join / Context manipulation
# running examples:
# - res_users.name, like, foo: name is on res_partner, not on res_users
# - res_partner.bank_ids.name, like, foo: bank_ids is a one2many with _auto_join
# - res_partner.state_id.name, like, foo: state_id is a many2one with _auto_join
# A join:
# - link between src_table and dst_table, using src_field and dst_field
# i.e.: inherits: res_users.partner_id = res_partner.id
# i.e.: one2many: res_partner.id = res_partner_bank.partner_id
# i.e.: many2one: res_partner.state_id = res_country_state.id
# - done in the context of a field
# i.e.: inherits: 'partner_id'
# i.e.: one2many: 'bank_ids'
# i.e.: many2one: 'state_id'
# - table names use aliases: initial table followed by the context field
# names, joined using a '__'
# i.e.: inherits: res_partner as res_users__partner_id
# i.e.: one2many: res_partner_bank as res_partner__bank_ids
# i.e.: many2one: res_country_state as res_partner__state_id
# - join condition use aliases
# i.e.: inherits: res_users.partner_id = res_users__partner_id.id
# i.e.: one2many: res_partner.id = res_partner__bank_ids.parr_id
# i.e.: many2one: res_partner.state_id = res_partner__state_id.id
# Variables explanation:
# - src_table: working table before the join
# -> res_users, res_partner, res_partner
# - dst_table: working table after the join
# -> res_partner, res_partner_bank, res_country_state
# - src_table_link_name: field name used to link the src table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'partner_id', found in the inherits of the current table
# i.e.: one2many: 'id', not a field
# i.e.: many2one: 'state_id', the current field name
# - dst_table_link_name: field name used to link the dst table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'id', not a field
# i.e.: one2many: 'partner_id', _fields_id of the current field
# i.e.: many2one: 'id', not a field
# - context_field_name: field name used as a context to make the alias
# i.e.: inherits: 'partner_id': found in the inherits of the current table
# i.e.: one2many: 'bank_ids': current field name
# i.e.: many2one: 'state_id': current field name
# --------------------------------------------------
def __init__(self, leaf, model, join_context=None, internal=False):
""" Initialize the ExtendedLeaf
:attr [string, tuple] leaf: operator or tuple-formatted domain
expression
:attr obj model: current working model
:attr list _models: list of chained models, updated when
adding joins
:attr list join_context: list of join contexts. This is a list of
tuples like ``(lhs, table, lhs_col, col, link)``
where
lhs
source (left hand) model
model
destination (right hand) model
lhs_col
source model column for join condition
col
destination model column for join condition
link
link column between source and destination model
that is not necessarily (but generally) a real column used
in the condition (i.e. in many2one); this link is used to
compute aliases
"""
assert isinstance(model, BaseModel), 'Invalid leaf creation without table'
self.join_context = join_context or []
self.leaf = leaf
# normalize the leaf's operator
self.normalize_leaf()
# set working variables; handle the context stack and previous tables
self.model = model
self._models = []
for item in self.join_context:
self._models.append(item[0])
self._models.append(model)
# check validity
self.check_leaf(internal)
def __str__(self):
return '<osv.ExtendedLeaf: %s on %s (ctx: %s)>' % (str(self.leaf), self.model._table, ','.join(self._get_context_debug()))
def generate_alias(self):
links = [(context[1]._table, context[4]) for context in self.join_context]
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
return alias
def add_join_context(self, model, lhs_col, table_col, link):
""" See above comments for more details. A join context is a tuple like:
``(lhs, model, lhs_col, col, link)``
After adding the join, the model of the current leaf is updated.
"""
self.join_context.append((self.model, model, lhs_col, table_col, link))
self._models.append(model)
self.model = model
def get_join_conditions(self):
conditions = []
alias = self._models[0]._table
for context in self.join_context:
previous_alias = alias
alias += '__' + context[4]
conditions.append('"%s"."%s"="%s"."%s"' % (previous_alias, context[2], alias, context[3]))
return conditions
def get_tables(self):
tables = set()
links = []
for context in self.join_context:
links.append((context[1]._table, context[4]))
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
tables.add(alias_statement)
return tables
def _get_context_debug(self):
names = ['"%s"."%s"="%s"."%s" (%s)' % (item[0]._table, item[2], item[1]._table, item[3], item[4]) for item in self.join_context]
return names
# --------------------------------------------------
# Leaf manipulation
# --------------------------------------------------
def check_leaf(self, internal=False):
""" Leaf validity rules:
- a valid leaf is an operator or a leaf
- a valid leaf has a field objects unless
- it is not a tuple
- it is an inherited field
- left is id, operator is 'child_of'
- left is in MAGIC_COLUMNS
"""
if not is_operator(self.leaf) and not is_leaf(self.leaf, internal):
raise ValueError("Invalid leaf %s" % str(self.leaf))
def is_operator(self):
return is_operator(self.leaf)
def is_true_leaf(self):
return self.leaf == TRUE_LEAF
def is_false_leaf(self):
return self.leaf == FALSE_LEAF
def is_leaf(self, internal=False):
return is_leaf(self.leaf, internal=internal)
def normalize_leaf(self):
self.leaf = normalize_leaf(self.leaf)
return True
def create_substitution_leaf(leaf, new_elements, new_model=None, internal=False):
""" From a leaf, create a new leaf (based on the new_elements tuple
and new_model), that will have the same join context. Used to
insert equivalent leafs in the processing stack. """
if new_model is None:
new_model = leaf.model
new_join_context = [tuple(context) for context in leaf.join_context]
new_leaf = ExtendedLeaf(new_elements, new_model, join_context=new_join_context, internal=internal)
return new_leaf
class expression(object):
""" Parse a domain expression
Use a real polish notation
Leafs are still in a ('foo', '=', 'bar') format
For more info: http://christophe-simonis-at-tiny.blogspot.com/2008/08/new-new-domain-notation.html
"""
def __init__(self, cr, uid, exp, table, context):
""" Initialize expression object and automatically parse the expression
right after initialization.
:param exp: expression (using domain ('foo', '=', 'bar' format))
:param table: root model
:attr list result: list that will hold the result of the parsing
as a list of ExtendedLeaf
:attr list joins: list of join conditions, such as
(res_country_state."id" = res_partner."state_id")
:attr root_model: base model for the query
:attr list expression: the domain expression, that will be normalized
and prepared
"""
self._unaccent = get_unaccent_wrapper(cr)
self.joins = []
self.root_model = table
# normalize and prepare the expression for parsing
self.expression = distribute_not(normalize_domain(exp))
# parse the domain expression
self.parse(cr, uid, context=context)
# ----------------------------------------
# Leafs management
# ----------------------------------------
def get_tables(self):
""" Returns the list of tables for SQL queries, like select from ... """
tables = []
for leaf in self.result:
for table in leaf.get_tables():
if table not in tables:
tables.append(table)
table_name = _quote(self.root_model._table)
if table_name not in tables:
tables.append(table_name)
return tables
# ----------------------------------------
# Parsing
# ----------------------------------------
def parse(self, cr, uid, context):
""" Transform the leaves of the expression
The principle is to pop elements from a leaf stack one at a time.
Each leaf is processed. The processing is a if/elif list of various
cases that appear in the leafs (many2one, function fields, ...).
Two things can happen as a processing result:
- the leaf has been modified and/or new leafs have to be introduced
in the expression; they are pushed into the leaf stack, to be
processed right after
- the leaf is added to the result
Some internal var explanation:
:var list path: left operand seen as a sequence of field names
("foo.bar" -> ["foo", "bar"])
:var obj model: model object, model containing the field
(the name provided in the left operand)
:var obj field: the field corresponding to `path[0]`
:var obj column: the column corresponding to `path[0]`
:var obj comodel: relational model of field (field.comodel)
(res_partner.bank_ids -> res.partner.bank)
"""
def to_ids(value, comodel, context=None, limit=None):
""" Normalize a single id or name, or a list of those, into a list of ids
:param {int,long,basestring,list,tuple} value:
if int, long -> return [value]
if basestring, convert it into a list of basestrings, then
if list of basestring ->
perform a name_search on comodel for each name
return the list of related ids
"""
names = []
if isinstance(value, basestring):
names = [value]
elif value and isinstance(value, (tuple, list)) and all(isinstance(item, basestring) for item in value):
names = value
elif isinstance(value, (int, long)):
return [value]
if names:
name_get_list = [name_get[0] for name in names for name_get in comodel.name_search(cr, uid, name, [], 'ilike', context=context, limit=limit)]
return list(set(name_get_list))
return list(value)
def child_of_domain(left, ids, left_model, parent=None, prefix='', context=None):
""" Return a domain implementing the child_of operator for [(left,child_of,ids)],
either as a range using the parent_left/right tree lookup fields
(when available), or as an expanded [(left,in,child_ids)] """
if not ids:
return FALSE_DOMAIN
if left_model._parent_store and (not left_model.pool._init):
# TODO: Improve where joins are implemented for many with '.', replace by:
# doms += ['&',(prefix+'.parent_left','<',o.parent_right),(prefix+'.parent_left','>=',o.parent_left)]
doms = []
for o in left_model.browse(cr, uid, ids, context=context):
if doms:
doms.insert(0, OR_OPERATOR)
doms += [AND_OPERATOR, ('parent_left', '<', o.parent_right), ('parent_left', '>=', o.parent_left)]
if prefix:
return [(left, 'in', left_model.search(cr, uid, doms, context=context))]
return doms
else:
def recursive_children(ids, model, parent_field):
if not ids:
return []
ids2 = model.search(cr, uid, [(parent_field, 'in', ids)], context=context)
return ids + recursive_children(ids2, model, parent_field)
return [(left, 'in', recursive_children(ids, left_model, parent or left_model._parent_name))]
def pop():
""" Pop a leaf to process. """
return self.stack.pop()
def push(leaf):
""" Push a leaf to be processed right after. """
self.stack.append(leaf)
def push_result(leaf):
""" Push a leaf to the results. This leaf has been fully processed
and validated. """
self.result.append(leaf)
self.result = []
self.stack = [ExtendedLeaf(leaf, self.root_model) for leaf in self.expression]
# process from right to left; expression is from left to right
self.stack.reverse()
while self.stack:
# Get the next leaf to process
leaf = pop()
# Get working variables
if leaf.is_operator():
left, operator, right = leaf.leaf, None, None
elif leaf.is_true_leaf() or leaf.is_false_leaf():
# because we consider left as a string
left, operator, right = ('%s' % leaf.leaf[0], leaf.leaf[1], leaf.leaf[2])
else:
left, operator, right = leaf.leaf
path = left.split('.', 1)
model = leaf.model
field = model._fields.get(path[0])
column = model._columns.get(path[0])
comodel = model.pool.get(getattr(field, 'comodel_name', None))
# ----------------------------------------
# SIMPLE CASE
# 1. leaf is an operator
# 2. leaf is a true/false leaf
# -> add directly to result
# ----------------------------------------
if leaf.is_operator() or leaf.is_true_leaf() or leaf.is_false_leaf():
push_result(leaf)
# ----------------------------------------
# FIELD NOT FOUND
# -> from inherits'd fields -> work on the related model, and add
# a join condition
# -> ('id', 'child_of', '..') -> use a 'to_ids'
# -> but is one on the _log_access special fields, add directly to
# result
# TODO: make these fields explicitly available in self.columns instead!
# -> else: crash
# ----------------------------------------
elif not column and path[0] in model._inherit_fields:
# comments about inherits'd fields
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
next_model = model.pool[model._inherit_fields[path[0]][0]]
leaf.add_join_context(next_model, model._inherits[next_model._name], 'id', model._inherits[next_model._name])
push(leaf)
elif left == 'id' and operator == 'child_of':
ids2 = to_ids(right, model, context)
dom = child_of_domain(left, ids2, model)
for dom_leaf in reversed(dom):
new_leaf = create_substitution_leaf(leaf, dom_leaf, model)
push(new_leaf)
elif not column and path[0] in MAGIC_COLUMNS:
push_result(leaf)
elif not field:
raise ValueError("Invalid field %r in leaf %r" % (left, str(leaf)))
# ----------------------------------------
# PATH SPOTTED
# -> many2one or one2many with _auto_join:
# - add a join, then jump into linked column: column.remaining on
# src_table is replaced by remaining on dst_table, and set for re-evaluation
# - if a domain is defined on the column, add it into evaluation
# on the relational table
# -> many2one, many2many, one2many: replace by an equivalent computed
# domain, given by recursively searching on the remaining of the path
# -> note: hack about columns.property should not be necessary anymore
# as after transforming the column, it will go through this loop once again
# ----------------------------------------
elif len(path) > 1 and column and column._type == 'many2one' and column._auto_join:
# res_partner.state_id = res_partner__state_id.id
leaf.add_join_context(comodel, path[0], 'id', path[0])
push(create_substitution_leaf(leaf, (path[1], operator, right), comodel))
elif len(path) > 1 and column and column._type == 'one2many' and column._auto_join:
# res_partner.id = res_partner__bank_ids.partner_id
leaf.add_join_context(comodel, 'id', column._fields_id, path[0])
domain = column._domain(model) if callable(column._domain) else column._domain
push(create_substitution_leaf(leaf, (path[1], operator, right), comodel))
if domain:
domain = normalize_domain(domain)
for elem in reversed(domain):
push(create_substitution_leaf(leaf, elem, comodel))
push(create_substitution_leaf(leaf, AND_OPERATOR, comodel))
elif len(path) > 1 and column and column._auto_join:
raise NotImplementedError('_auto_join attribute not supported on many2many column %s' % left)
elif len(path) > 1 and column and column._type == 'many2one':
right_ids = comodel.search(cr, uid, [(path[1], operator, right)], context=context)
leaf.leaf = (path[0], 'in', right_ids)
push(leaf)
# Making search easier when there is a left operand as column.o2m or column.m2m
elif len(path) > 1 and column and column._type in ['many2many', 'one2many']:
right_ids = comodel.search(cr, uid, [(path[1], operator, right)], context=context)
leaf.leaf = (path[0], 'in', right_ids)
push(leaf)
elif not column:
# Non-stored field should provide an implementation of search.
if not field.search:
# field does not support search!
_logger.error("Non-stored field %s cannot be searched.", field)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
# Ignore it: generate a dummy leaf.
domain = []
else:
# Let the field generate a domain.
if len(path) > 1:
right = comodel.search(
cr, uid, [(path[1], operator, right)],
context=context)
operator = 'in'
recs = model.browse(cr, uid, [], context=context)
domain = field.determine_domain(recs, operator, right)
if not domain:
leaf.leaf = TRUE_LEAF
push(leaf)
else:
for elem in reversed(domain):
push(create_substitution_leaf(leaf, elem, model))
# -------------------------------------------------
# FUNCTION FIELD
# -> not stored: error if no _fnct_search, otherwise handle the result domain
# -> stored: management done in the remaining of parsing
# -------------------------------------------------
elif isinstance(column, fields.function) and not column.store:
# this is a function field that is not stored
if not column._fnct_search:
_logger.error(
"Field '%s' (%s) can not be searched: "
"non-stored function field without fnct_search",
column.string, left)
# avoid compiling stack trace if not needed
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
# ignore it: generate a dummy leaf
fct_domain = []
else:
fct_domain = column.search(cr, uid, model, left, [leaf.leaf], context=context)
if not fct_domain:
leaf.leaf = TRUE_LEAF
push(leaf)
else:
# we assume that the expression is valid
# we create a dummy leaf for forcing the parsing of the resulting expression
for domain_element in reversed(fct_domain):
push(create_substitution_leaf(leaf, domain_element, model))
# self.push(create_substitution_leaf(leaf, TRUE_LEAF, model))
# self.push(create_substitution_leaf(leaf, AND_OPERATOR, model))
# -------------------------------------------------
# RELATIONAL FIELDS
# -------------------------------------------------
# Applying recursivity on field(one2many)
elif column._type == 'one2many' and operator == 'child_of':
ids2 = to_ids(right, comodel, context)
if column._obj != model._name:
dom = child_of_domain(left, ids2, comodel, prefix=column._obj)
else:
dom = child_of_domain('id', ids2, model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, model))
elif column._type == 'one2many':
call_null = True
if right is not False:
if isinstance(right, basestring):
ids2 = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, context=context, limit=None)]
if ids2:
operator = 'in'
elif isinstance(right, collections.Iterable):
ids2 = right
else:
ids2 = [right]
if not ids2:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null = False
push(create_substitution_leaf(leaf, FALSE_LEAF, model))
else:
# determine ids1 <-- column._fields_id --- ids2
if comodel._fields[column._fields_id].store:
ids1 = select_from_where(cr, column._fields_id, comodel._table, 'id', ids2, operator)
else:
recs = comodel.browse(cr, SUPERUSER_ID, ids2, {'prefetch_fields': False})
ids1 = recs.mapped(column._fields_id).ids
if ids1:
call_null = False
o2m_op = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(create_substitution_leaf(leaf, ('id', o2m_op, ids1), model))
elif operator in ('like', 'ilike', 'in', '='):
# no match found with positive search operator => no result (FALSE_LEAF)
call_null = False
push(create_substitution_leaf(leaf, FALSE_LEAF, model))
if call_null:
o2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
# determine ids from column._fields_id
if comodel._fields[column._fields_id].store:
ids1 = select_distinct_from_where_not_null(cr, column._fields_id, comodel._table)
else:
ids2 = comodel.search(cr, uid, [(column._fields_id, '!=', False)], context=context)
recs = comodel.browse(cr, SUPERUSER_ID, ids2, {'prefetch_fields': False})
ids1 = recs.mapped(column._fields_id).ids
push(create_substitution_leaf(leaf, ('id', o2m_op, ids1), model))
elif column._type == 'many2many':
rel_table, rel_id1, rel_id2 = column._sql_names(model)
#FIXME
if operator == 'child_of':
ids2 = to_ids(right, comodel, context)
dom = child_of_domain('id', ids2, comodel)
ids2 = comodel.search(cr, uid, dom, context=context)
if comodel == model:
push(create_substitution_leaf(leaf, ('id', 'in', ids2), model))
else:
subquery = 'SELECT "%s" FROM "%s" WHERE "%s" IN %%s' % (rel_id1, rel_table, rel_id2)
# avoid flattening of argument in to_sql()
subquery = cr.mogrify(subquery, [tuple(ids2)])
push(create_substitution_leaf(leaf, ('id', 'inselect', (subquery, [])), internal=True))
else:
call_null_m2m = True
if right is not False:
if isinstance(right, basestring):
res_ids = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, context=context, limit=None)]
if res_ids:
operator = 'in'
else:
if not isinstance(right, list):
res_ids = [right]
else:
res_ids = right
if not res_ids:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null_m2m = False
push(create_substitution_leaf(leaf, FALSE_LEAF, model))
else:
operator = 'in' # operator changed because ids are directly related to main object
else:
call_null_m2m = False
subop = 'not inselect' if operator in NEGATIVE_TERM_OPERATORS else 'inselect'
subquery = 'SELECT "%s" FROM "%s" WHERE "%s" IN %%s' % (rel_id1, rel_table, rel_id2)
# avoid flattening of argument in to_sql()
subquery = cr.mogrify(subquery, [tuple(filter(None, res_ids))])
push(create_substitution_leaf(leaf, ('id', subop, (subquery, [])), internal=True))
if call_null_m2m:
m2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(create_substitution_leaf(leaf, ('id', m2m_op, select_distinct_from_where_not_null(cr, rel_id1, rel_table)), model))
elif column._type == 'many2one':
if operator == 'child_of':
ids2 = to_ids(right, comodel, context)
if column._obj != model._name:
dom = child_of_domain(left, ids2, comodel, prefix=column._obj)
else:
dom = child_of_domain('id', ids2, model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, model))
else:
def _get_expression(comodel, cr, uid, left, right, operator, context=None):
if context is None:
context = {}
c = context.copy()
c['active_test'] = False
#Special treatment to ill-formed domains
operator = (operator in ['<', '>', '<=', '>=']) and 'in' or operator
dict_op = {'not in': '!=', 'in': '=', '=': 'in', '!=': 'not in'}
if isinstance(right, tuple):
right = list(right)
if (not isinstance(right, list)) and operator in ['not in', 'in']:
operator = dict_op[operator]
elif isinstance(right, list) and operator in ['!=', '=']: # for domain (FIELD,'=',['value1','value2'])
operator = dict_op[operator]
res_ids = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, limit=None, context=c)]
if operator in NEGATIVE_TERM_OPERATORS:
res_ids.append(False) # TODO this should not be appended if False was in 'right'
return left, 'in', res_ids
# resolve string-based m2o criterion into IDs
if isinstance(right, basestring) or \
right and isinstance(right, (tuple, list)) and all(isinstance(item, basestring) for item in right):
push(create_substitution_leaf(leaf, _get_expression(comodel, cr, uid, left, right, operator, context=context), model))
else:
# right == [] or right == False and all other cases are handled by __leaf_to_sql()
push_result(leaf)
# -------------------------------------------------
# OTHER FIELDS
# -> datetime fields: manage time part of the datetime
# column when it is not there
# -> manage translatable fields
# -------------------------------------------------
else:
if column._type == 'datetime' and right and len(right) == 10:
if operator in ('>', '<='):
right += ' 23:59:59'
else:
right += ' 00:00:00'
push(create_substitution_leaf(leaf, (left, operator, right), model))
elif column.translate and right:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
if need_wildcard:
right = '%%%s%%' % right
inselect_operator = 'inselect'
if sql_operator in NEGATIVE_TERM_OPERATORS:
# negate operator (fix lp:1071710)
sql_operator = sql_operator[4:] if sql_operator[:3] == 'not' else '='
inselect_operator = 'not inselect'
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
instr = unaccent('%s')
if sql_operator == 'in':
# params will be flatten by to_sql() => expand the placeholders
instr = '(%s)' % ', '.join(['%s'] * len(right))
subselect = """WITH temp_irt_current (id, name) as (
SELECT ct.id, coalesce(it.value,ct.{quote_left})
FROM {current_table} ct
LEFT JOIN ir_translation it ON (it.name = %s and
it.lang = %s and
it.type = %s and
it.res_id = ct.id and
it.value != '')
)
SELECT id FROM temp_irt_current WHERE {name} {operator} {right} order by name
""".format(current_table=model._table, quote_left=_quote(left), name=unaccent('name'),
operator=sql_operator, right=instr)
params = (
model._name + ',' + left,
context.get('lang') or 'en_US',
'model',
right,
)
push(create_substitution_leaf(leaf, ('id', inselect_operator, (subselect, params)), model, internal=True))
else:
push_result(leaf)
# ----------------------------------------
# END OF PARSING FULL DOMAIN
# -> generate joins
# ----------------------------------------
joins = set()
for leaf in self.result:
joins |= set(leaf.get_join_conditions())
self.joins = list(joins)
def __leaf_to_sql(self, eleaf):
model = eleaf.model
leaf = eleaf.leaf
left, operator, right = leaf
# final sanity checks - should never fail
assert operator in (TERM_OPERATORS + ('inselect', 'not inselect')), \
"Invalid operator %r in domain term %r" % (operator, leaf)
assert leaf in (TRUE_LEAF, FALSE_LEAF) or left in model._fields \
or left in MAGIC_COLUMNS, "Invalid field %r in domain term %r" % (left, leaf)
assert not isinstance(right, BaseModel), \
"Invalid value %r in domain term %r" % (right, leaf)
table_alias = '"%s"' % (eleaf.generate_alias())
if leaf == TRUE_LEAF:
query = 'TRUE'
params = []
elif leaf == FALSE_LEAF:
query = 'FALSE'
params = []
elif operator == 'inselect':
query = '(%s."%s" in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator == 'not inselect':
query = '(%s."%s" not in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator in ['in', 'not in']:
# Two cases: right is a boolean or a list. The boolean case is an
# abuse and handled for backward compatibility.
if isinstance(right, bool):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % (leaf,))
if operator == 'in':
r = 'NOT NULL' if right else 'NULL'
else:
r = 'NULL' if right else 'NOT NULL'
query = '(%s."%s" IS %s)' % (table_alias, left, r)
params = []
elif isinstance(right, (list, tuple)):
params = list(right)
check_nulls = False
for i in range(len(params))[::-1]:
if params[i] == False:
check_nulls = True
del params[i]
if params:
if left == 'id':
instr = ','.join(['%s'] * len(params))
else:
ss = model._columns[left]._symbol_set
instr = ','.join([ss[0]] * len(params))
params = map(ss[1], params)
query = '(%s."%s" %s (%s))' % (table_alias, left, operator, instr)
else:
# The case for (left, 'in', []) or (left, 'not in', []).
query = 'FALSE' if operator == 'in' else 'TRUE'
if check_nulls and operator == 'in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif not check_nulls and operator == 'not in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif check_nulls and operator == 'not in':
query = '(%s AND %s."%s" IS NOT NULL)' % (query, table_alias, left) # needed only for TRUE.
else: # Must not happen
raise ValueError("Invalid domain term %r" % (leaf,))
elif (left in model._columns) and model._columns[left]._type == "boolean" and ((operator == '=' and right is False) or (operator == '!=' and right is True)):
query = '(%s."%s" IS NULL or %s."%s" = false )' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '='):
query = '%s."%s" IS NULL ' % (table_alias, left)
params = []
elif (left in model._columns) and model._columns[left]._type == "boolean" and ((operator == '!=' and right is False) or (operator == '==' and right is True)):
query = '(%s."%s" IS NOT NULL and %s."%s" != false)' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '!='):
query = '%s."%s" IS NOT NULL' % (table_alias, left)
params = []
elif operator == '=?':
if right is False or right is None:
# '=?' is a short-circuit that makes the term TRUE if right is None or False
query = 'TRUE'
params = []
else:
# '=?' behaves like '=' in other cases
query, params = self.__leaf_to_sql(
create_substitution_leaf(eleaf, (left, '=', right), model))
elif left == 'id':
query = '%s.id %s %%s' % (table_alias, operator)
params = right
else:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
cast = '::text' if sql_operator.endswith('like') else ''
if left in model._columns:
format = need_wildcard and '%s' or model._columns[left]._symbol_set[0]
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
column = '%s.%s' % (table_alias, _quote(left))
query = '(%s %s %s)' % (unaccent(column + cast), sql_operator, unaccent(format))
elif left in MAGIC_COLUMNS:
query = "(%s.\"%s\"%s %s %%s)" % (table_alias, left, cast, sql_operator)
params = right
else: # Must not happen
raise ValueError("Invalid field %r in domain term %r" % (left, leaf))
add_null = False
if need_wildcard:
if isinstance(right, str):
str_utf8 = right
elif isinstance(right, unicode):
str_utf8 = right.encode('utf-8')
else:
str_utf8 = str(right)
params = '%%%s%%' % str_utf8
add_null = not str_utf8
elif left in model._columns:
params = model._columns[left]._symbol_set[1](right)
if add_null:
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
if isinstance(params, basestring):
params = [params]
return query, params
def to_sql(self):
stack = []
params = []
# Process the domain from right to left, using a stack, to generate a SQL expression.
self.result.reverse()
for leaf in self.result:
if leaf.is_leaf(internal=True):
q, p = self.__leaf_to_sql(leaf)
params.insert(0, p)
stack.append(q)
elif leaf.leaf == NOT_OPERATOR:
stack.append('(NOT (%s))' % (stack.pop(),))
else:
ops = {AND_OPERATOR: ' AND ', OR_OPERATOR: ' OR '}
q1 = stack.pop()
q2 = stack.pop()
stack.append('(%s %s %s)' % (q1, ops[leaf.leaf], q2,))
assert len(stack) == 1
query = stack[0]
joins = ' AND '.join(self.joins)
if joins:
query = '(%s) AND %s' % (joins, query)
return query, tools.flatten(params)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
imminent-tuba/thesis | server/chatterbot/chatterbot/adapters/storage/jsondatabase.py | 1 | 4775 | from chatterbot.adapters.storage import StorageAdapter
from chatterbot.adapters.exceptions import EmptyDatabaseException
from chatterbot.conversation import Statement, Response
# from jsondb import Database
class JsonDatabaseAdapter(StorageAdapter):
"""
The JsonDatabaseAdapter is an interface that allows ChatterBot
to store the conversation as a Json-encoded file.
"""
def __init__(self, **kwargs):
super(JsonDatabaseAdapter, self).__init__(**kwargs)
database_path = self.kwargs.get("database", "database.db")
self.database = Database(database_path)
def _keys(self):
# The value has to be cast as a list for Python 3 compatibility
return list(self.database[0].keys())
def count(self):
return len(self._keys())
def find(self, statement_text):
values = self.database.data(key=statement_text)
if not values:
return None
# Build the objects for the response list
response_list = self.deserialize_responses(values["in_response_to"])
values["in_response_to"] = response_list
return Statement(statement_text, **values)
def remove(self, statement_text):
"""
Removes the statement that matches the input text.
Removes any responses from statements if the response text matches the
input text.
"""
for statement in self.filter(in_response_to__contains=statement_text):
statement.remove_response(statement_text)
self.update(statement)
self.database.delete(statement_text)
def deserialize_responses(self, response_list):
"""
Takes the list of response items and returns the
list converted to object versions of the responses.
"""
in_response_to = []
for response in response_list:
text = response["text"]
del(response["text"])
in_response_to.append(
Response(text, **response)
)
return in_response_to
def _all_kwargs_match_values(self, kwarguments, values):
for kwarg in kwarguments:
if "__" in kwarg:
kwarg_parts = kwarg.split("__")
key = kwarg_parts[0]
identifier = kwarg_parts[1]
if identifier == "contains":
text_values = []
for val in values[key]:
text_values.append(val["text"])
if (kwarguments[kwarg] not in text_values) and (
kwarguments[kwarg] not in values[key]):
return False
if kwarg in values:
if values[kwarg] != kwarguments[kwarg]:
return False
return True
def filter(self, **kwargs):
"""
Returns a list of statements in the database
that match the parameters specified.
"""
results = []
for key in self._keys():
values = self.database.data(key=key)
# Add the text attribute to the values
values["text"] = key
if self._all_kwargs_match_values(kwargs, values):
# Build the objects for the response list
in_response_to = values["in_response_to"]
response_list = self.deserialize_responses(in_response_to)
values["in_response_to"] = response_list
# Remove the text attribute from the values
text = values.pop("text")
results.append(
Statement(text, **values)
)
return results
def update(self, statement):
# Do not alter the database unless writing is enabled
if not self.read_only:
data = statement.serialize()
# Remove the text key from the data
del(data['text'])
self.database.data(key=statement.text, value=data)
# Make sure that an entry for each response exists
for response_statement in statement.in_response_to:
response = self.find(response_statement.text)
if not response:
response = Statement(response_statement.text)
self.update(response)
return statement
def get_random(self):
from random import choice
if self.count() < 1:
raise EmptyDatabaseException()
statement = choice(self._keys())
return self.find(statement)
def drop(self):
"""
Remove the json file database completely.
"""
import os
if os.path.exists(self.database.path):
os.remove(self.database.path)
| mit |
andymckay/zamboni | mkt/developers/tests/test_providers.py | 1 | 14056 | from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from mock import ANY, Mock, patch
from nose.tools import eq_, ok_, raises
from amo.tests import app_factory, TestCase
from mkt.constants.payments import (PROVIDER_BANGO, PROVIDER_BOKU,
PROVIDER_REFERENCE)
from mkt.developers.models import PaymentAccount, SolitudeSeller
from mkt.developers.providers import (account_check, Bango, Boku, get_provider,
Reference)
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
class Patcher(object):
"""
This class patch your test case so that any attempt to call solitude
from zamboni through these classes will use the mock.
Use this class as mixin on any tests that alter payment accounts.
If you override setUp or tearDown be sure to call super.
"""
def setUp(self, *args, **kw):
super(Patcher, self).setUp(*args, **kw)
# Once everything has moved over to the provider, this one
# can be remoed.
client_patcher = patch('mkt.developers.models.client',
name='test_providers.Patcher.client_patcher')
self.patched_client = client_patcher.start()
self.patched_client.patcher = client_patcher
self.addCleanup(client_patcher.stop)
bango_patcher = patch('mkt.developers.providers.Bango.client',
name='test_providers.Patcher.bango_patcher')
self.bango_patcher = bango_patcher.start()
self.bango_patcher.patcher = bango_patcher
self.addCleanup(bango_patcher.stop)
bango_p_patcher = patch(
'mkt.developers.providers.Bango.client_provider',
name='test_providers.Patcher.bango_p_patcher')
self.bango_p_patcher = bango_p_patcher.start()
self.bango_p_patcher.patcher = bango_p_patcher
self.addCleanup(bango_p_patcher.stop)
boku_patcher = patch('mkt.developers.providers.Boku.client',
name='test_providers.Patcher.boku_patcher')
self.boku_patcher = boku_patcher.start()
self.boku_patcher.patcher = boku_patcher
self.addCleanup(boku_patcher.stop)
ref_patcher = patch('mkt.developers.providers.Reference.client',
name='test_providers.Patcher.ref_patcher')
self.ref_patcher = ref_patcher.start()
self.ref_patcher.patcher = ref_patcher
self.addCleanup(ref_patcher.stop)
generic_patcher = patch('mkt.developers.providers.Provider.generic',
name='test_providers.Patcher.generic_patcher')
self.generic_patcher = generic_patcher.start()
self.generic_patcher.patcher = generic_patcher
self.addCleanup(generic_patcher.stop)
class TestSetup(TestCase):
def test_multiple(self):
with self.settings(PAYMENT_PROVIDERS=['bango', 'reference'],
DEFAULT_PAYMENT_PROVIDER='bango'):
eq_(get_provider().name, 'bango')
class TestBase(TestCase):
def test_check(self):
provider = Reference()
@account_check
def test(self, account):
pass
provider.test = test
provider.test(provider, PaymentAccount(provider=PROVIDER_REFERENCE))
with self.assertRaises(ValueError):
provider.test(provider, PaymentAccount(provider=PROVIDER_BOKU))
class TestBango(Patcher, TestCase):
fixtures = fixture('user_999')
def setUp(self):
super(TestBango, self).setUp()
self.user = UserProfile.objects.filter()[0]
self.app = app_factory()
self.make_premium(self.app)
self.seller = SolitudeSeller.objects.create(
resource_uri='sellerres', user=self.user
)
self.account = PaymentAccount.objects.create(
solitude_seller=self.seller,
user=self.user, name='paname', uri='acuri',
inactive=False, seller_uri='selluri',
account_id=123, provider=PROVIDER_BANGO
)
self.bango = Bango()
def test_create(self):
self.generic_patcher.product.get_object_or_404.return_value = {
'resource_uri': 'gpuri'}
self.bango_patcher.product.get_object_or_404.return_value = {
'resource_uri': 'bpruri', 'bango_id': 'bango#', 'seller': 'selluri'
}
uri = self.bango.product_create(self.account, self.app)
eq_(uri, 'bpruri')
def test_create_new(self):
self.bango_patcher.product.get_object_or_404.side_effect = (
ObjectDoesNotExist)
self.bango_p_patcher.product.post.return_value = {
'resource_uri': '', 'bango_id': 1
}
self.bango.product_create(self.account, self.app)
ok_('packageId' in
self.bango_p_patcher.product.post.call_args[1]['data'])
def test_terms_bleached(self):
self.bango_patcher.sbi.agreement.get_object.return_value = {
'text': '<script>foo</script><h3></h3>'}
eq_(self.bango.terms_retrieve(Mock())['text'],
u'<script>foo</script><h3></h3>')
class TestReference(Patcher, TestCase):
fixtures = fixture('user_999')
def setUp(self, *args, **kw):
super(TestReference, self).setUp(*args, **kw)
self.user = UserProfile.objects.get(pk=999)
self.ref = Reference()
def test_setup_seller(self):
self.ref.setup_seller(self.user)
ok_(SolitudeSeller.objects.filter(user=self.user).exists())
def test_account_create(self):
data = {'account_name': 'account', 'name': 'f', 'email': 'a@a.com'}
self.patched_client.api.generic.seller.post.return_value = {
'resource_uri': '/1'
}
res = self.ref.account_create(self.user, data)
acct = PaymentAccount.objects.get(user=self.user)
eq_(acct.provider, PROVIDER_REFERENCE)
eq_(res.pk, acct.pk)
self.ref_patcher.sellers.post.assert_called_with(data={
'status': 'ACTIVE',
'email': 'a@a.com',
'uuid': ANY,
'name': 'f',
'seller': '/1'
})
def make_account(self):
seller = SolitudeSeller.objects.create(user=self.user)
return PaymentAccount.objects.create(user=self.user,
solitude_seller=seller,
uri='/f/b/1',
name='account name',
provider=PROVIDER_REFERENCE)
def test_terms_retrieve(self):
account = self.make_account()
self.ref.terms_retrieve(account)
assert self.ref_patcher.terms.called
def test_terms_bleached(self):
account = self.make_account()
account_mock = Mock()
account_mock.get.return_value = {'text':
'<script>foo</script><a>bar</a>'}
self.ref_patcher.terms.return_value = account_mock
eq_(self.ref.terms_retrieve(account)['text'],
u'<script>foo</script><a>bar</a>')
def test_terms_update(self):
seller_mock = Mock()
seller_mock.get.return_value = {
'id': 1,
'resource_uri': '/a/b/c',
'resource_name': 'x',
'reference': {}
}
seller_mock.put.return_value = {}
self.ref_patcher.sellers.return_value = seller_mock
account = self.make_account()
self.ref.terms_update(account)
eq_(account.reload().agreed_tos, True)
assert self.ref_patcher.sellers.called
seller_mock.get.assert_called_with()
seller_mock.put.assert_called_with({
'agreement': datetime.now().strftime('%Y-%m-%d'),
'seller': ''
})
def test_account_retrieve(self):
account = self.make_account()
acc = self.ref.account_retrieve(account)
eq_(acc, {'account_name': 'account name'})
assert self.ref_patcher.sellers.called
def test_account_update(self):
account_data = {
'status': '',
'resource_name': 'sellers',
'uuid': 'custom-uuid',
'agreement': '',
'email': 'a@a.com',
'id': 'custom-uuid',
'resource_uri': '/provider/reference/sellers/custom-uuid/',
'account_name': u'Test',
'name': 'Test',
}
seller_mock = Mock()
seller_mock.get.return_value = account_data
self.ref_patcher.sellers.return_value = seller_mock
account = self.make_account()
self.ref.account_update(account, account_data)
eq_(self.ref.forms['account']().hidden_fields()[0].name, 'uuid')
eq_(account.reload().name, 'Test')
seller_mock.put.assert_called_with(account_data)
def test_product_create_exists(self):
account = self.make_account()
app = app_factory()
self.ref.product_create(account, app)
# Product should have been got from zippy, but not created by a post.
assert not self.ref_patcher.products.post.called
def test_product_create_not(self):
self.generic_patcher.product.get_object_or_404.return_value = {
'external_id': 'ext',
'resource_uri': '/f',
'public_id': 'public:id',
'seller_uuids': {'reference': None}
}
self.ref_patcher.products.get.return_value = []
self.ref_patcher.products.post.return_value = {'resource_uri': '/f'}
account = self.make_account()
app = app_factory()
self.ref.product_create(account, app)
self.ref_patcher.products.post.assert_called_with(data={
'seller_product': '/f',
'seller_reference': '/f/b/1',
'name': unicode(app.name),
'uuid': ANY,
})
class TestBoku(Patcher, TestCase):
fixtures = fixture('user_999')
def setUp(self, *args, **kw):
super(TestBoku, self).setUp(*args, **kw)
self.user = UserProfile.objects.get(pk=999)
self.boku = Boku()
def make_account(self):
seller = SolitudeSeller.objects.create(user=self.user)
return PaymentAccount.objects.create(user=self.user,
solitude_seller=seller,
uri='/f/b/1',
name='account name',
provider=PROVIDER_BOKU)
def test_account_create(self):
data = {'account_name': 'account',
'service_id': 'b'}
res = self.boku.account_create(self.user, data)
acct = PaymentAccount.objects.get(user=self.user)
eq_(acct.provider, PROVIDER_BOKU)
eq_(acct.agreed_tos, True)
eq_(res.pk, acct.pk)
self.boku_patcher.seller.post.assert_called_with(data={
'seller': ANY,
'service_id': 'b',
})
def test_terms_update(self):
account = self.make_account()
assert not account.agreed_tos
response = self.boku.terms_update(account)
assert account.agreed_tos
assert response['accepted']
def test_create_new_product(self):
account = self.make_account()
app = app_factory()
generic_product_uri = '/generic/product/1/'
boku_product_uri = '/boku/product/1/'
self.generic_patcher.product.get_object_or_404.return_value = {
'resource_pk': 1,
'resource_uri': generic_product_uri,
}
self.boku_patcher.product.get.return_value = {
'meta': {'total_count': 0},
'objects': [],
}
self.boku_patcher.product.post.return_value = {
'resource_uri': boku_product_uri,
'seller_product': generic_product_uri,
'seller_boku': account.uri,
}
product = self.boku.product_create(account, app)
eq_(product, boku_product_uri)
self.boku_patcher.product.post.assert_called_with(data={
'seller_boku': account.uri,
'seller_product': generic_product_uri,
})
def test_update_existing_product(self):
account = self.make_account()
app = app_factory()
generic_product_uri = '/generic/product/1/'
self.generic_patcher.product.get_object_or_404.return_value = {
'resource_pk': 1,
'resource_uri': generic_product_uri,
}
existing_boku_product_uri = '/boku/product/1/'
self.boku_patcher.product.get.return_value = {
'meta': {'total_count': 1},
'objects': [{
'resource_uri': existing_boku_product_uri,
}],
}
patch_mock = Mock()
patch_mock.patch.return_value = {
'resource_uri': existing_boku_product_uri,
'seller_product': generic_product_uri,
'seller_boku': account.uri,
}
self.boku_patcher.by_url.return_value = patch_mock
product = self.boku.product_create(account, app)
eq_(product, existing_boku_product_uri)
self.boku_patcher.by_url.assert_called_with(existing_boku_product_uri)
patch_mock.patch.assert_called_with(data={
'seller_boku': account.uri,
'seller_product': generic_product_uri,
})
def test_multiple_existing_products_raises_exception(self):
account = self.make_account()
app = app_factory()
generic_product_uri = '/generic/product/1/'
self.generic_patcher.product.get_object_or_404.return_value = {
'resource_pk': 1,
'resource_uri': generic_product_uri,
}
self.boku_patcher.product.get.return_value = {
'meta': {'total_count': 2},
'objects': [],
}
with self.assertRaises(ValueError):
self.boku.product_create(account, app)
| bsd-3-clause |
edx/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/gis/gdal/prototypes/generation.py | 321 | 3766 | """
This module contains functions that generate ctypes prototypes for the
GDAL routines.
"""
from ctypes import c_char_p, c_double, c_int, c_void_p
from django.contrib.gis.gdal.prototypes.errcheck import \
check_arg_errcode, check_errcode, check_geom, check_geom_offset, \
check_pointer, check_srs, check_str_arg, check_string, check_const_string
class gdal_char_p(c_char_p):
pass
def double_output(func, argtypes, errcheck=False, strarg=False):
"Generates a ctypes function that returns a double value."
func.argtypes = argtypes
func.restype = c_double
if errcheck: func.errcheck = check_arg_errcode
if strarg: func.errcheck = check_str_arg
return func
def geom_output(func, argtypes, offset=None):
"""
Generates a function that returns a Geometry either by reference
or directly (if the return_geom keyword is set to True).
"""
# Setting the argument types
func.argtypes = argtypes
if not offset:
# When a geometry pointer is directly returned.
func.restype = c_void_p
func.errcheck = check_geom
else:
# Error code returned, geometry is returned by-reference.
func.restype = c_int
def geomerrcheck(result, func, cargs):
return check_geom_offset(result, func, cargs, offset)
func.errcheck = geomerrcheck
return func
def int_output(func, argtypes):
"Generates a ctypes function that returns an integer value."
func.argtypes = argtypes
func.restype = c_int
return func
def srs_output(func, argtypes):
"""
Generates a ctypes prototype for the given function with
the given C arguments that returns a pointer to an OGR
Spatial Reference System.
"""
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_srs
return func
def const_string_output(func, argtypes, offset=None):
func.argtypes = argtypes
if offset:
func.restype = c_int
else:
func.restype = c_char_p
def _check_const(result, func, cargs):
return check_const_string(result, func, cargs, offset=offset)
func.errcheck = _check_const
return func
def string_output(func, argtypes, offset=-1, str_result=False):
"""
Generates a ctypes prototype for the given function with the
given argument types that returns a string from a GDAL pointer.
The `const` flag indicates whether the allocated pointer should
be freed via the GDAL library routine VSIFree -- but only applies
only when `str_result` is True.
"""
func.argtypes = argtypes
if str_result:
# Use subclass of c_char_p so the error checking routine
# can free the memory at the pointer's address.
func.restype = gdal_char_p
else:
# Error code is returned
func.restype = c_int
# Dynamically defining our error-checking function with the
# given offset.
def _check_str(result, func, cargs):
return check_string(result, func, cargs,
offset=offset, str_result=str_result)
func.errcheck = _check_str
return func
def void_output(func, argtypes, errcheck=True):
"""
For functions that don't only return an error code that needs to
be examined.
"""
if argtypes: func.argtypes = argtypes
if errcheck:
# `errcheck` keyword may be set to False for routines that
# return void, rather than a status code.
func.restype = c_int
func.errcheck = check_errcode
else:
func.restype = None
return func
def voidptr_output(func, argtypes):
"For functions that return c_void_p."
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_pointer
return func
| gpl-3.0 |
ballesterus/UPhO | Get_fasta_from_Ref.py | 1 | 3740 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import argparse
#Function definitions
def Fasta_to_Dict(File):
'''Creates a dictionary of FASTA sequences in a File, with seqIs as key to the sequences.'''
with open(File, 'r') as F:
Records = {}
for Line in F:
if Line.startswith('>'):
Seqid = Line.split(' ')[0].strip('>').strip('\n')
Seq= ''
Records[Seqid] = Seq
else:
Seq = Records[Seqid] + Line.strip('\n')
Records[Seqid] = Seq.upper()
return Records
def FastaRetriever(seqId, FastaDict):
"""Returns a FASTA formated record from a seqID and a fastaDict where fasta Id is key in FastaDict"""
try:
seq=FastaDict[seqId]
return ">%s\n%s\n" %(seqId,seq)
except:
print "\x1b[1;31;40mALERT: The sequence ID: %s was not found in the source Fasta file.\x1b[0m" % seqId
def main(query, outdir, prefix, reference):
handle = open(query, 'r')
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
print 'The output dir already exist!'
Counter = 0
seqSource = Fasta_to_Dict(reference)
for line in handle:
if len(line) > 0: # do not process empty lines
line = line.replace(' ', '' ) # remove white spaces
qlist = line.strip('\n').split(',')
qlist = [i for i in qlist if i != ""]
if line.startswith('#'): #means that filenames are provided in the input this being the fisrt field in the csv.
Name = qlist.pop(0)
OG_filename = Name.strip('#') + '.fasta'
OG_outfile = open(outdir + '/' + OG_filename, 'w')
else:
OG_filename = prefix + "_" + str(Counter) + ".fasta"
OG_outfile = open(outdir + '/' + OG_filename, 'w')
Counter += 1
for seqId in qlist:
seq=FastaRetriever(seqId, seqSource)
try:
OG_outfile.write(seq)
except:
print "There is a problem retrieving the seqID: %s. Verify the seqID is the exactly same in query and source files.\n" % seqId
exit(1)
print "Successfully created file: %s" % OG_filename
OG_outfile.close()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='This script creates fasta files from a list of sequence idetifiers. It takes as input a file in which each line is list of of sequence identifiers to be written in multi-fasta file; and a Reference file, which contains the identifiers and their sequences. Fasta id in query and Reference should be identical. The output files are named with using a user defined prefix and a counter, or if a name defined by the user is preferred, this should be given as the first element of the list and identified by starting with "#" ')
parser.add_argument('-q', dest = 'query', type = str, default= 'None', help = 'File with wanted fasta identifiers separated by ",". ')
parser.add_argument('-o', dest= 'outdir', type =str, default= '.', help ='Name of the directory to use as output, if does no exist this wll be created. Default "."')
parser.add_argument('-p', dest= 'prefix', type = str, default= 'Group', help ='Prefix to use whe no group name is provided')
parser.add_argument('-R', dest= 'Reference', type = str, default= 'None', help ='A fasta file with the source fasta sequences in the input tree. If provided, a fasta file will be created for each ortholog found')
args, unknown = parser.parse_known_args()
main(args.query, args.outdir, args.prefix, args.Reference)
| gpl-3.0 |
cctaylor/googleads-python-lib | examples/adwords/v201502/error_handling/handle_policy_violation_error.py | 3 | 4209 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example demonstrates how to handle policy violation errors.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: AdGroupAdService.mutate
"""
__author__ = 'Joseph DiLallo'
import re
import suds
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
ad_group_ad_service = client.GetService('AdGroupAdService', 'v201502')
# Create text ad.
text_ad_operation = {
'operator': 'ADD',
'operand': {
'adGroupId': ad_group_id,
'ad': {
# The 'xsi_type' field allows you to specify the xsi:type of the
# object being created. It's only necessary when you must provide
# an explicit type that the client library can't infer.
'xsi_type': 'TextAd',
'headline': 'Mars Cruise!!!',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'finalUrls': ['http://www.example.com'],
'displayUrl': 'www.example.com',
}
}
}
operations = [text_ad_operation]
# Validate the ad.
try:
# Enable "validate only" to check for errors.
client.validate_only = True
ad_group_ad_service.mutate(operations)
print 'Validation successful, no errors returned.'
except suds.WebFault, e:
for error in e.fault.detail.ApiExceptionFault.errors:
if error['ApiError.Type'] == 'PolicyViolationError':
operation_index = re.findall(r'operations\[(.*)\]\.',
error['fieldPath'])
if operation_index:
operation = operations[int(operation_index[0])]
print ('Ad with headline \'%s\' violated %s policy \'%s\'.' %
(operation['operand']['ad']['headline'],
'exemptable' if error['isExemptable'] else 'non-exemptable',
error['externalPolicyName']))
if error['isExemptable'].lower() == 'true':
# Add exemption request to the operation.
print ('Adding exemption request for policy name \'%s\' on text '
'\'%s\'.' %
(error['key']['policyName'], error['key']['violatingText']))
if 'exemptionRequests' not in operation:
operation['exemptionRequests'] = []
operation['exemptionRequests'].append({
'key': error['key']
})
else:
# Remove non-exemptable operation
print 'Removing the operation from the request.'
operations.delete(operation)
else:
# Non-policy error returned, re-throw exception.
raise e
# Add these ads. Disable "validate only" so the ads will get created.
client.validate_only = False
if operations:
response = ad_group_ad_service.mutate(operations)
if response and response['value']:
ads = response['value']
print 'Added %s ad(s) to ad group %s.' % (len(ads), ad_group_id)
for ad in ads:
print (' Ad id is %s, type is %s and status is \'%s\'.' %
(ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
print 'No ads were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| apache-2.0 |
chirilo/remo | vendor-local/lib/python/rest_framework/reverse.py | 4 | 1306 | """
Provide urlresolver functions that return fully qualified URLs or view names
"""
from __future__ import unicode_literals
from django.core.urlresolvers import reverse as django_reverse
from django.utils import six
from django.utils.functional import lazy
def reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra):
"""
If versioning is being used then we pass any `reverse` calls through
to the versioning scheme instance, so that the resulting URL
can be modified if needed.
"""
scheme = getattr(request, 'versioning_scheme', None)
if scheme is not None:
return scheme.reverse(viewname, args, kwargs, request, format, **extra)
return _reverse(viewname, args, kwargs, request, format, **extra)
def _reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra):
"""
Same as `django.core.urlresolvers.reverse`, but optionally takes a request
and returns a fully qualified URL, using the request to get the base URL.
"""
if format is not None:
kwargs = kwargs or {}
kwargs['format'] = format
url = django_reverse(viewname, args=args, kwargs=kwargs, **extra)
if request:
return request.build_absolute_uri(url)
return url
reverse_lazy = lazy(reverse, six.text_type)
| bsd-3-clause |
EzeAlbornoz5/TP-Ingenieria-web | doyourfreight/forums/views.py | 1 | 12790 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from .forms import *
from .models import *
from shippings.models import *
def ShippingsPendingsNotifications(user):
userProfile = UserProfile.objects.filter(fk_user=user)
userType = userProfile[0].fk_userType.id
if userType == 1:
shippings = Ship.objects.filter(fk_userClient=userProfile, isNotificationForClient=True)
else:
shippings = Ship.objects.filter(fk_userCompany=userProfile, isNotificationForCompany=True,
fk_state__description='Pendiente')
return shippings
def ForumHome(request):
if request.method == 'GET':
data = {}
if request.user.is_authenticated:
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
######################### Notifications #########################
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
if userProfile.fk_userType.pk == 1:
data['userType'] = 'Cliente'
data['forumsCategories'] = ForumCategories.objects.exclude(name='Combinación de viajes')
else:
data['userType'] = 'Empresa'
data['forumsCategories'] = ForumCategories.objects.exclude(name='Reputación de empresas')
else:
data['forumsCategories'] = ForumCategories.objects.exclude(name='Combinación de viajes').exclude(
name='Reputación de empresas')
else:
data['forumsCategories'] = ForumCategories.objects.exclude(name='Combinación de viajes').exclude(
name='Reputación de empresas')
return render(request, 'forum.html', data)
def ForumThreads(request, pk):
data = {}
forumCategory = ForumCategories.objects.get(pk=pk)
data['forumCategory'] = forumCategory
data['threads'] = Thread.objects.filter(fk_forum=forumCategory).order_by('-submitDate')[:50]
data['topThreads'] = Thread.objects.filter(fk_forum=forumCategory).order_by('-totalScore')[:10]
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
return render(request, 'threads.html', data)
@login_required
def ForumNewThread(request, category):
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
data = {}
if request.method == 'POST':
form = FormThread(request.POST)
forumCategory = ForumCategories.objects.get(pk=category)
if form.is_valid():
print('Es valido')
threadSaved = form.save()
thread = Thread.objects.get(name=threadSaved.name, fk_forum=threadSaved.fk_forum,
content=threadSaved.content, fk_author=threadSaved.fk_author,
submitDate=threadSaved.submitDate)
return redirect('forums:forum_view_thread', forumCategory.pk, thread.pk)
else:
forumCategory = ForumCategories.objects.get(pk=category)
data['forumCategory'] = forumCategory
form = FormThread(initial={'fk_forum': forumCategory, 'fk_author': request.user})
data['form'] = form
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
return render(request, 'new_thread.html', data)
else:
if userProfile.fk_state.id == 2:
return HttpResponseNotFound('<h2>Usted se encuentra baneado temporalmente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
if userProfile.fk_state.id == 3:
return HttpResponseNotFound('<h2>Usted se encuentra baneado permanentemente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
def ForumViewThread(request, category, thread):
data = {'forumCategory': ForumCategories.objects.get(pk=category),
'replies': Reply.objects.filter(fk_thread__pk=thread).order_by('submitDate'),
'thread': Thread.objects.get(id=thread)}
if request.user.is_authenticated:
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
data['form'] = FormReply(initial={'fk_thread': data['thread'], 'fk_author': request.user})
threadscore = ThreadScore.objects.filter(fk_user=request.user, fk_thread=data['thread'])
if len(threadscore) == 0:
data['like'] = 'Null'
else:
data['like'] = threadscore[0].like
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
else:
data['like'] = 'Null'
if request.method == 'POST':
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
if 'like' in request.POST or 'dislike' in request.POST:
thread = Thread.objects.get(pk=request.POST['threadID'])
threadscore, created = ThreadScore.objects.get_or_create(fk_thread=thread, fk_user=request.user)
if 'like' in request.POST:
if threadscore.like is None:
thread.totalScore += 1
else:
thread.totalScore += 2
threadscore.like = True
else:
if threadscore.like is None:
thread.totalScore -= 1
else:
thread.totalScore -= 2
threadscore.like = False
data['like'] = threadscore.like
threadscore.save()
thread.save()
data['thread'] = Thread.objects.get(id=thread.id)
else:
if 'report' in request.POST:
Report(request.POST['reportFrom'], request.POST['fromID'], request.POST['reason'], request.user)
else:
if 'comment' in request.POST:
data['thread'] = Thread.objects.get(pk=request.POST['fk_thread'])
data['forumCategory'] = data['thread'].fk_forum
data['replies'] = Reply.objects.filter(fk_thread__pk=request.POST['fk_thread']).order_by('submitDate')
form = FormReply(request.POST)
data['form'] = form
if form.is_valid():
form.save()
data['form'] = FormReply(initial={'fk_thread': data['thread'], 'fk_author': request.user})
else:
if userProfile.fk_state.id == 2:
return HttpResponseNotFound(
'<h2>Usted se encuentra baneado temporalmente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
if userProfile.fk_state.id == 3:
return HttpResponseNotFound(
'<h2>Usted se encuentra baneado permanentemente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
data['reportsReplies'] = []
if request.user.is_authenticated:
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
reportThread = Reports.objects.filter(fk_userAuth=request.user, reportFrom=1, fromID=data['thread'].id)
reportsReplies = Reports.objects.filter(fk_userAuth=request.user, reportFrom=2)
for reportReply in reportsReplies:
data['reportsReplies'].append(reportReply.fromID)
if len(reportThread) == 0:
data['reportThread'] = False
else:
data['reportThread'] = True
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
return render(request, 'thread_view.html', data)
# Funcion para generar el registro de la denuncia
def Report(reportFrom, fromID, reason, user):
report = Reports()
report.reportFrom = reportFrom
report.fromID = fromID
report.reasonReport = reason
report.fk_userAuth = user
report.save()
return None
@login_required
def ForumEditThread(request, category, thread):
data = {'categoryID': category, 'threadID': thread}
threadEdit = Thread.objects.get(pk=thread)
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
if request.method == 'POST':
form = FormThread(request.POST)
forumCategory = ForumCategories.objects.get(pk=category)
if form.is_valid():
print('Es valido')
threadSaved = form.save(commit=False)
threadEdit.name = threadSaved.name
threadEdit.content = threadSaved.content
threadEdit.save()
threadSaved = threadEdit
thread = Thread.objects.get(name=threadSaved.name, fk_forum=threadSaved.fk_forum,
content=threadSaved.content, fk_author=threadSaved.fk_author,
submitDate=threadSaved.submitDate)
return redirect('forums:forum_view_thread', forumCategory.pk, thread.pk)
else:
forumCategory = ForumCategories.objects.get(pk=category)
data['forumCategory'] = forumCategory
form = FormThread(initial={'fk_forum': forumCategory, 'fk_author': request.user, 'name': threadEdit.name,
'content': threadEdit.content})
data['form'] = form
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
return render(request, 'edit_thread.html', data)
else:
if userProfile.fk_state.id == 2:
return HttpResponseNotFound('<h2>Usted se encuentra baneado temporalmente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
if userProfile.fk_state.id == 3:
return HttpResponseNotFound('<h2>Usted se encuentra baneado permanentemente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
| gpl-3.0 |
faywong/FFPlayer | project/jni/python/src/Lib/test/test_index.py | 56 | 6882 | import unittest
from test import test_support
import operator
from sys import maxint
maxsize = test_support.MAX_Py_ssize_t
minsize = -maxsize-1
class oldstyle:
def __index__(self):
return self.ind
class newstyle(object):
def __index__(self):
return self.ind
class TrapInt(int):
def __index__(self):
return self
class TrapLong(long):
def __index__(self):
return self
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.o = oldstyle()
self.n = newstyle()
def test_basic(self):
self.o.ind = -2
self.n.ind = 2
self.assertEqual(operator.index(self.o), -2)
self.assertEqual(operator.index(self.n), 2)
def test_slice(self):
self.o.ind = 1
self.n.ind = 2
slc = slice(self.o, self.o, self.o)
check_slc = slice(1, 1, 1)
self.assertEqual(slc.indices(self.o), check_slc.indices(1))
slc = slice(self.n, self.n, self.n)
check_slc = slice(2, 2, 2)
self.assertEqual(slc.indices(self.n), check_slc.indices(2))
def test_wrappers(self):
self.o.ind = 4
self.n.ind = 5
self.assertEqual(6 .__index__(), 6)
self.assertEqual(-7L.__index__(), -7)
self.assertEqual(self.o.__index__(), 4)
self.assertEqual(self.n.__index__(), 5)
def test_subclasses(self):
r = range(10)
self.assertEqual(r[TrapInt(5):TrapInt(10)], r[5:10])
self.assertEqual(r[TrapLong(5):TrapLong(10)], r[5:10])
self.assertEqual(slice(TrapInt()).indices(0), (0,0,1))
self.assertEqual(slice(TrapLong(0)).indices(0), (0,0,1))
def test_error(self):
self.o.ind = 'dumb'
self.n.ind = 'bad'
self.failUnlessRaises(TypeError, operator.index, self.o)
self.failUnlessRaises(TypeError, operator.index, self.n)
self.failUnlessRaises(TypeError, slice(self.o).indices, 0)
self.failUnlessRaises(TypeError, slice(self.n).indices, 0)
class SeqTestCase(unittest.TestCase):
# This test case isn't run directly. It just defines common tests
# to the different sequence types below
def setUp(self):
self.o = oldstyle()
self.n = newstyle()
self.o2 = oldstyle()
self.n2 = newstyle()
def test_index(self):
self.o.ind = -2
self.n.ind = 2
self.assertEqual(self.seq[self.n], self.seq[2])
self.assertEqual(self.seq[self.o], self.seq[-2])
def test_slice(self):
self.o.ind = 1
self.o2.ind = 3
self.n.ind = 2
self.n2.ind = 4
self.assertEqual(self.seq[self.o:self.o2], self.seq[1:3])
self.assertEqual(self.seq[self.n:self.n2], self.seq[2:4])
def test_repeat(self):
self.o.ind = 3
self.n.ind = 2
self.assertEqual(self.seq * self.o, self.seq * 3)
self.assertEqual(self.seq * self.n, self.seq * 2)
self.assertEqual(self.o * self.seq, self.seq * 3)
self.assertEqual(self.n * self.seq, self.seq * 2)
def test_wrappers(self):
self.o.ind = 4
self.n.ind = 5
self.assertEqual(self.seq.__getitem__(self.o), self.seq[4])
self.assertEqual(self.seq.__mul__(self.o), self.seq * 4)
self.assertEqual(self.seq.__rmul__(self.o), self.seq * 4)
self.assertEqual(self.seq.__getitem__(self.n), self.seq[5])
self.assertEqual(self.seq.__mul__(self.n), self.seq * 5)
self.assertEqual(self.seq.__rmul__(self.n), self.seq * 5)
def test_subclasses(self):
self.assertEqual(self.seq[TrapInt()], self.seq[0])
self.assertEqual(self.seq[TrapLong()], self.seq[0])
def test_error(self):
self.o.ind = 'dumb'
self.n.ind = 'bad'
indexobj = lambda x, obj: obj.seq[x]
self.failUnlessRaises(TypeError, indexobj, self.o, self)
self.failUnlessRaises(TypeError, indexobj, self.n, self)
sliceobj = lambda x, obj: obj.seq[x:]
self.failUnlessRaises(TypeError, sliceobj, self.o, self)
self.failUnlessRaises(TypeError, sliceobj, self.n, self)
class ListTestCase(SeqTestCase):
seq = [0,10,20,30,40,50]
def test_setdelitem(self):
self.o.ind = -2
self.n.ind = 2
lst = list('ab!cdefghi!j')
del lst[self.o]
del lst[self.n]
lst[self.o] = 'X'
lst[self.n] = 'Y'
self.assertEqual(lst, list('abYdefghXj'))
lst = [5, 6, 7, 8, 9, 10, 11]
lst.__setitem__(self.n, "here")
self.assertEqual(lst, [5, 6, "here", 8, 9, 10, 11])
lst.__delitem__(self.n)
self.assertEqual(lst, [5, 6, 8, 9, 10, 11])
def test_inplace_repeat(self):
self.o.ind = 2
self.n.ind = 3
lst = [6, 4]
lst *= self.o
self.assertEqual(lst, [6, 4, 6, 4])
lst *= self.n
self.assertEqual(lst, [6, 4, 6, 4] * 3)
lst = [5, 6, 7, 8, 9, 11]
l2 = lst.__imul__(self.n)
self.assert_(l2 is lst)
self.assertEqual(lst, [5, 6, 7, 8, 9, 11] * 3)
class TupleTestCase(SeqTestCase):
seq = (0,10,20,30,40,50)
class StringTestCase(SeqTestCase):
seq = "this is a test"
class UnicodeTestCase(SeqTestCase):
seq = u"this is a test"
class XRangeTestCase(unittest.TestCase):
def test_xrange(self):
n = newstyle()
n.ind = 5
self.assertEqual(xrange(1, 20)[n], 6)
self.assertEqual(xrange(1, 20).__getitem__(n), 6)
class OverflowTestCase(unittest.TestCase):
def setUp(self):
self.pos = 2**100
self.neg = -self.pos
def test_large_longs(self):
self.assertEqual(self.pos.__index__(), self.pos)
self.assertEqual(self.neg.__index__(), self.neg)
def _getitem_helper(self, base):
class GetItem(base):
def __len__(self):
return maxint #cannot return long here
def __getitem__(self, key):
return key
def __getslice__(self, i, j):
return i, j
x = GetItem()
self.assertEqual(x[self.pos], self.pos)
self.assertEqual(x[self.neg], self.neg)
self.assertEqual(x[self.neg:self.pos], (maxint+minsize, maxsize))
self.assertEqual(x[self.neg:self.pos:1].indices(maxsize), (0, maxsize, 1))
def test_getitem(self):
self._getitem_helper(object)
def test_getitem_classic(self):
class Empty: pass
self._getitem_helper(Empty)
def test_sequence_repeat(self):
self.failUnlessRaises(OverflowError, lambda: "a" * self.pos)
self.failUnlessRaises(OverflowError, lambda: "a" * self.neg)
def test_main():
test_support.run_unittest(
BaseTestCase,
ListTestCase,
TupleTestCase,
StringTestCase,
UnicodeTestCase,
XRangeTestCase,
OverflowTestCase,
)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
ioram7/keystone-federado-pgid2013 | build/sqlalchemy/build/lib.linux-x86_64-2.7/sqlalchemy/dialects/sqlite/base.py | 17 | 32174 | # sqlite/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the SQLite database.
For information on connecting using a specific driver, see the documentation
section regarding that driver.
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide
out of the box functionality for translating values between Python `datetime` objects
and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime`
and related types provide date formatting and parsing functionality when SQlite is used.
The implementation classes are :class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also nicely
support ordering. There's no reliance on typical "libc" internals for these functions
so historical dates are fully supported.
Auto Incrementing Behavior
--------------------------
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
Two things to note:
* The AUTOINCREMENT keyword is **not** required for SQLite tables to
generate primary key values automatically. AUTOINCREMENT only means that
the algorithm used to generate ROWID values should be slightly different.
* SQLite does **not** generate primary key (i.e. ROWID) values, even for
one column, if the table has a composite (i.e. multi-column) primary key.
This is regardless of the AUTOINCREMENT keyword being present or not.
To specifically render the AUTOINCREMENT keyword on the primary key
column when rendering DDL, add the flag ``sqlite_autoincrement=True``
to the Table construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level`` parameter which results in
the command ``PRAGMA read_uncommitted <level>`` being invoked for every new
connection. Valid values for this parameter are ``SERIALIZABLE`` and
``READ UNCOMMITTED`` corresponding to a value of 0 and 1, respectively.
See the section :ref:`pysqlite_serializable` for an important workaround
when using serializable isolation with Pysqlite.
Database Locking Behavior / Concurrency
---------------------------------------
Note that SQLite is not designed for a high level of concurrency. The database
itself, being a file, is locked completely during write operations and within
transactions, meaning exactly one connection has exclusive access to the database
during this period - all other connections will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is always
in a transaction; there is no BEGIN method, only commit and rollback. This implies
that a SQLite DBAPI driver would technically allow only serialized access to a
particular database file at all times. The pysqlite driver attempts to ameliorate this by
deferring the actual BEGIN statement until the first DML (INSERT, UPDATE, or
DELETE) is received within a transaction. While this breaks serializable isolation,
it at least delays the exclusive locking inherent in SQLite's design.
SQLAlchemy's default mode of usage with the ORM is known
as "autocommit=False", which means the moment the :class:`.Session` begins to be
used, a transaction is begun. As the :class:`.Session` is used, the autoflush
feature, also on by default, will flush out pending changes to the database
before each query. The effect of this is that a :class:`.Session` used in its
default mode will often emit DML early on, long before the transaction is actually
committed. This again will have the effect of serializing access to the SQLite
database. If highly concurrent reads are desired against the SQLite database,
it is advised that the autoflush feature be disabled, and potentially even
that autocommit be re-enabled, which has the effect of each SQL statement and
flush committing changes immediately.
For more information on SQLite's lack of concurrency by design, please
see `Situations Where Another RDBMS May Work Better - High Concurrency <http://www.sqlite.org/whentouse.html>`_
near the bottom of the page.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation
of the table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite libary must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections
before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically
for new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_ -
on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
"""
import datetime, re
from sqlalchemy import sql, exc
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.sql import compiler
from sqlalchemy import processors
from sqlalchemy.types import BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL,\
FLOAT, REAL, INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR
class _DateTimeMixin(object):
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%04d-%02d-%02d %02d:%02d:%02d.%06d" % (value.year,
value.month, value.day,
value.hour, value.minute,
value.second, value.microsecond)
e.g.::
2011-03-15 12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(
storage_format="%04d/%02d/%02d %02d-%02d-%02d-%06d",
regexp=re.compile("(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the
tuple ``(value.year, value.month, value.day, value.hour,
value.minute, value.second, value.microsecond)``, given a
Python datetime.datetime() object.
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is applied to
the Python datetime() constructor via ``*map(int,
match_obj.groups(0))``.
"""
_storage_format = "%04d-%02d-%02d %02d:%02d:%02d.%06d"
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format % (value.year, value.month, value.day,
value.hour, value.minute, value.second,
value.microsecond)
elif isinstance(value, datetime_date):
return format % (value.year, value.month, value.day,
0, 0, 0, 0)
else:
raise TypeError("SQLite DateTime type only accepts Python "
"datetime and date objects as input.")
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%04d-%02d-%02d" % (value.year, value.month, value.day)
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%02d/%02d/%02d",
regexp=re.compile("(\d+)/(\d+)/(\d+)")
)
:param storage_format: format string which will be applied to the
tuple ``(value.year, value.month, value.day)``,
given a Python datetime.date() object.
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is applied to
the Python date() constructor via ``*map(int,
match_obj.groups(0))``.
"""
_storage_format = "%04d-%02d-%02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format % (value.year, value.month, value.day)
else:
raise TypeError("SQLite Date type only accepts Python "
"date objects as input.")
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%02d:%02d:%02d.%06d" % (value.hour, value.minute,
value.second,
value.microsecond)
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(
storage_format="%02d-%02d-%02d-%06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied
to the tuple ``(value.hour, value.minute, value.second,
value.microsecond)``, given a Python datetime.time() object.
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is applied to
the Python time() constructor via ``*map(int,
match_obj.groups(0))``.
"""
_storage_format = "%02d:%02d:%02d.%06d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format % (value.hour, value.minute, value.second,
value.microsecond)
else:
raise TypeError("SQLite Time type only accepts Python "
"time objects as input.")
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.Time: TIME,
}
ischema_names = {
'BLOB': sqltypes.BLOB,
'BOOL': sqltypes.BOOLEAN,
'BOOLEAN': sqltypes.BOOLEAN,
'CHAR': sqltypes.CHAR,
'DATE': sqltypes.DATE,
'DATETIME': sqltypes.DATETIME,
'DECIMAL': sqltypes.DECIMAL,
'FLOAT': sqltypes.FLOAT,
'INT': sqltypes.INTEGER,
'INTEGER': sqltypes.INTEGER,
'NUMERIC': sqltypes.NUMERIC,
'REAL': sqltypes.REAL,
'SMALLINT': sqltypes.SMALLINT,
'TEXT': sqltypes.TEXT,
'TIME': sqltypes.TIME,
'TIMESTAMP': sqltypes.TIMESTAMP,
'VARCHAR': sqltypes.VARCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'month': '%m',
'day': '%d',
'year': '%Y',
'second': '%S',
'hour': '%H',
'doy': '%j',
'minute': '%M',
'epoch': '%s',
'dow': '%w',
'week': '%W'
})
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast)
else:
return self.process(cast.clause)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field], self.process(extract.expr, **kw))
except KeyError:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field)
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += "\n LIMIT " + self.process(sql.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(sql.literal(select._offset))
else:
text += " OFFSET " + self.process(sql.literal(0))
return text
def for_update_clause(self, select):
# sqlite has no "FOR UPDATE" AFAICT
return ''
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
if column.primary_key and \
column.table.kwargs.get('sqlite_autoincrement', False) and \
len(column.table.primary_key.columns) == 1 and \
issubclass(column.type._type_affinity, sqltypes.Integer) and \
not column.foreign_keys:
colspec += " PRIMARY KEY AUTOINCREMENT"
return colspec
def visit_primary_key_constraint(self, constraint):
# for columns with sqlite_autoincrement=True,
# the PRIMARY KEY constraint can only be inline
# with the column itself.
if len(constraint.columns) == 1:
c = list(constraint)[0]
if c.primary_key and \
c.table.kwargs.get('sqlite_autoincrement', False) and \
issubclass(c.type._type_affinity, sqltypes.Integer) and \
not c.foreign_keys:
return None
return super(SQLiteDDLCompiler, self).\
visit_primary_key_constraint(constraint)
def visit_foreign_key_constraint(self, constraint):
local_table = constraint._elements.values()[0].parent.table
remote_table = list(constraint._elements.values())[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(constraint)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=False)
def visit_create_index(self, create):
index = create.element
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (preparer.format_index(index,
name=self._index_identifier(index.name)),
preparer.format_table(index.table, use_schema=False),
', '.join(preparer.quote(c.name, c.quote)
for c in index.columns))
return text
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([
'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
'attach', 'autoincrement', 'before', 'begin', 'between', 'by',
'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit',
'conflict', 'constraint', 'create', 'cross', 'current_date',
'current_time', 'current_timestamp', 'database', 'default',
'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct',
'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',
'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob',
'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',
'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect', 'into', 'is',
'isnull', 'join', 'key', 'left', 'like', 'limit', 'match', 'natural',
'not', 'notnull', 'null', 'of', 'offset', 'on', 'or', 'order', 'outer',
'plan', 'pragma', 'primary', 'query', 'raise', 'references',
'reindex', 'rename', 'replace', 'restrict', 'right', 'rollback',
'row', 'select', 'set', 'table', 'temp', 'temporary', 'then', 'to',
'transaction', 'trigger', 'true', 'union', 'unique', 'update', 'using',
'vacuum', 'values', 'view', 'virtual', 'when', 'where',
])
def format_index(self, index, use_schema=True, name=None):
"""Prepare a quoted index and schema name."""
if name is None:
name = index.name
result = self.quote(name, index.quote)
if not self.omit_schema and use_schema and getattr(index.table, "schema", None):
result = self.quote_schema(index.table.schema, index.table.quote_schema) + "." + result
return result
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return self.execution_options.get("sqlite_raw_colnames", False)
def _translate_colname(self, colname):
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname"
# in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = 'sqlite'
supports_alter = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_default_values = True
supports_empty_insert = False
supports_cast = True
default_paramstyle = 'qmark'
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
isolation_level = None
supports_cast = True
supports_default_values = True
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some
# hypothetical driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
self.supports_default_values = \
self.dbapi.sqlite_version_info >= (3, 3, 8)
self.supports_cast = \
self.dbapi.sqlite_version_info >= (3, 2, 3)
_isolation_lookup = {
'READ UNCOMMITTED':1,
'SERIALIZABLE':0
}
def set_isolation_level(self, connection, level):
try:
isolation_level = self._isolation_lookup[level.replace('_', ' ')]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('PRAGMA read_uncommitted')
res = cursor.fetchone()
if res:
value = res[0]
else:
# http://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = '%s.sqlite_master' % qschema
s = ("SELECT name FROM %s "
"WHERE type='table' ORDER BY name") % (master,)
rs = connection.execute(s)
else:
try:
s = ("SELECT name FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE type='table' ORDER BY name")
rs = connection.execute(s)
except exc.DBAPIError:
s = ("SELECT name FROM sqlite_master "
"WHERE type='table' ORDER BY name")
rs = connection.execute(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
pragma = "PRAGMA %s." % quote(schema)
else:
pragma = "PRAGMA "
qtable = quote(table_name)
cursor = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
row = cursor.fetchone()
# consume remaining rows, to work around
# http://www.sqlite.org/cvstrac/tktview?tn=1884
while not cursor.closed and cursor.fetchone() is not None:
pass
return (row is not None)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = '%s.sqlite_master' % qschema
s = ("SELECT name FROM %s "
"WHERE type='view' ORDER BY name") % (master,)
rs = connection.execute(s)
else:
try:
s = ("SELECT name FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE type='view' ORDER BY name")
rs = connection.execute(s)
except exc.DBAPIError:
s = ("SELECT name FROM sqlite_master "
"WHERE type='view' ORDER BY name")
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = '%s.sqlite_master' % qschema
s = ("SELECT sql FROM %s WHERE name = '%s'"
"AND type='view'") % (master, view_name)
rs = connection.execute(s)
else:
try:
s = ("SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = '%s' "
"AND type='view'") % view_name
rs = connection.execute(s)
except exc.DBAPIError:
s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
"AND type='view'") % view_name
rs = connection.execute(s)
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
pragma = "PRAGMA %s." % quote(schema)
else:
pragma = "PRAGMA "
qtable = quote(table_name)
c = _pragma_cursor(
connection.execute("%stable_info(%s)" %
(pragma, qtable)))
rows = c.fetchall()
columns = []
for row in rows:
(name, type_, nullable, default, primary_key) = \
(row[1], row[2].upper(), not row[3],
row[4], row[5])
columns.append(self._get_column_info(name, type_, nullable,
default, primary_key))
return columns
def _get_column_info(self, name, type_, nullable,
default, primary_key):
match = re.match(r'(\w+)(\(.*?\))?', type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = "VARCHAR"
args = ''
try:
coltype = self.ischema_names[coltype]
if args is not None:
args = re.findall(r'(\d+)', args)
coltype = coltype(*[int(a) for a in args])
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NullType()
if default is not None:
default = unicode(default)
return {
'name': name,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement': default is None,
'primary_key': primary_key
}
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
cols = self.get_columns(connection, table_name, schema, **kw)
pkeys = []
for col in cols:
if col['primary_key']:
pkeys.append(col['name'])
return pkeys
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
pragma = "PRAGMA %s." % quote(schema)
else:
pragma = "PRAGMA "
qtable = quote(table_name)
c = _pragma_cursor(connection.execute("%sforeign_key_list(%s)" % (pragma, qtable)))
fkeys = []
fks = {}
while True:
row = c.fetchone()
if row is None:
break
(numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
# sqlite won't return rcol if the table
# was created with REFERENCES <tablename>, no col
if rcol is None:
rcol = lcol
# see http://www.sqlalchemy.org/trac/ticket/2568
# as well as http://www.sqlite.org/src/info/600482d161
if self.dbapi.sqlite_version_info < (3, 6, 14):
rtbl = re.sub(r'^\"|\"$', '', rtbl)
try:
fk = fks[numerical_id]
except KeyError:
fk = {
'name': None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : rtbl,
'referred_columns' : []
}
fkeys.append(fk)
fks[numerical_id] = fk
# look up the table based on the given table's engine, not 'self',
# since it could be a ProxyEngine
if lcol not in fk['constrained_columns']:
fk['constrained_columns'].append(lcol)
if rcol not in fk['referred_columns']:
fk['referred_columns'].append(rcol)
return fkeys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
pragma = "PRAGMA %s." % quote(schema)
else:
pragma = "PRAGMA "
include_auto_indexes = kw.pop('include_auto_indexes', False)
qtable = quote(table_name)
c = _pragma_cursor(connection.execute("%sindex_list(%s)" % (pragma, qtable)))
indexes = []
while True:
row = c.fetchone()
if row is None:
break
# ignore implicit primary key index.
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
elif not include_auto_indexes and row[1].startswith('sqlite_autoindex'):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in indexes:
c = connection.execute("%sindex_info(%s)" % (pragma, quote(idx['name'])))
cols = idx['column_names']
while True:
row = c.fetchone()
if row is None:
break
cols.append(row[2])
return indexes
def _pragma_cursor(cursor):
"""work around SQLite issue whereby cursor.description
is blank when PRAGMA returns no rows."""
if cursor.closed:
cursor.fetchone = lambda: None
cursor.fetchall = lambda: []
return cursor
| apache-2.0 |
alex/raven | docs/conf.py | 6 | 7322 | # -*- coding: utf-8 -*-
#
# Sentry documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 20 16:21:42 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinxtogithub']
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Raven'
copyright = u'2010, David Cramer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __import__('pkg_resources').get_distribution('raven').version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'django': ('http://docs.djangoproject.com/en/dev/', 'http://docs.djangoproject.com/en/dev/_objects/'),
'http://raven.readthedocs.org/en/latest': None
}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ravendoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Raven.tex', u'Raven Documentation',
u'David Cramer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'raven', u'Raven Documentation',
[u'David Cramer'], 1)
]
| bsd-3-clause |
junwoo091400/MyCODES | Projects/FootPad_Logger/logged_data_analyzer_LSTM/RNN_LSTM.py | 1 | 2131 |
from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import ipdb
def RNN_LSTM(batch_size_in = 5, total_len_in = 30000, pad_len_in = 5, backprop_len_in = 50, state_size_in = 10, num_class_in = 32):
# total_len_in = (backprop_len_in) * (num_batches)
# Get inputs.
batch_size = batch_size_in
total_series_length = total_len_in
pad_length = pad_len_in
truncated_backprop_length = backprop_len_in
state_size = state_size_in
num_classes = num_class_in
num_batches = total_series_length // truncated_backprop_length
#Model generate
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length, pad_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
cell_state = tf.placeholder(tf.float32, [batch_size, state_size])
hidden_state = tf.placeholder(tf.float32, [batch_size, state_size])
init_state = tf.nn.rnn_cell.LSTMStateTuple(cell_state, hidden_state)
# LSTM -> classes.
W2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32)
b2 = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32)
# Unpack columns
inputs_series = tf.unstack(batchX_placeholder, axis=1)
labels_series = tf.unstack(batchY_placeholder, axis=1) # Becomes [truncated_len, batch_size]
# Forward passes
cell = tf.contrib.rnn.BasicLSTMCell(state_size, state_is_tuple=True)
states_series, current_state = tf.contrib.rnn.static_rnn(cell, inputs_series, init_state)#Input 'init_state' + 'inputs_series' + 'cell'
logits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) for logits, labels in zip(logits_series,labels_series)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
return (batchX_placeholder, batchY_placeholder, cell_state, hidden_state, current_state, predictions_series, W2, b2, cell, train_step, total_loss) | gpl-3.0 |
kelvin13/Knockout | pygments/lexers/hdl.py | 21 | 18699 | # -*- coding: utf-8 -*-
"""
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
.. versionadded:: 1.4
"""
name = 'verilog'
aliases = ['verilog', 'v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[01]+', Number.Bin),
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text),
'import'),
(words((
'always', 'always_comb', 'always_ff', 'always_latch', 'and',
'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1',
'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign',
'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase',
'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive',
'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for',
'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0',
'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large',
'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge',
'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed',
'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1',
'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return',
'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed',
'small', 'specify', 'specparam', 'strength', 'string', 'strong0',
'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1',
'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait',
'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
(words((
'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype',
'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected',
'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate',
'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames',
'nounconnected_drive', 'protect', 'protected', 'remove_gatenames',
'remove_netnames', 'resetall', 'timescale', 'unconnected_drive',
'undef'), prefix=r'`', suffix=r'\b'),
Comment.Preproc),
(words((
'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose',
'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite',
'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log',
'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale',
'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset',
'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope',
'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb',
'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'),
prefix=r'\$', suffix=r'\b'),
Name.Builtin),
(words((
'byte', 'shortint', 'int', 'longint', 'integer', 'time',
'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'\$?[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
.. versionadded:: 1.5
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[01]+', Number.Bin),
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(words((
'accept_on', 'alias', 'always', 'always_comb', 'always_ff', 'always_latch',
'and', 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', 'bins',
'binsof', 'bit', 'break', 'buf', 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez',
'cell', 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config', 'const', 'constraint',
'context', 'continue', 'cover', 'covergroup', 'coverpoint', 'cross', 'deassign',
'default', 'defparam', 'design', 'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase',
'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction', 'endgenerate',
'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive',
'endprogram', 'endproperty', 'endsequence', 'endspecify', 'endtable',
'endtask', 'enum', 'event', 'eventually', 'expect', 'export', 'extends', 'extern',
'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin',
'function', 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', 'ifnone',
'ignore_bins', 'illegal_bins', 'implies', 'import', 'incdir', 'include',
'initial', 'inout', 'input', 'inside', 'instance', 'int', 'integer', 'interface',
'intersect', 'join', 'join_any', 'join_none', 'large', 'let', 'liblist', 'library',
'local', 'localparam', 'logic', 'longint', 'macromodule', 'matches', 'medium',
'modport', 'module', 'nand', 'negedge', 'new', 'nexttime', 'nmos', 'nor', 'noshowcancelled',
'not', 'notif0', 'notif1', 'null', 'or', 'output', 'package', 'packed', 'parameter',
'pmos', 'posedge', 'primitive', 'priority', 'program', 'property', 'protected',
'pull0', 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent',
'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real', 'realtime',
'ref', 'reg', 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos',
'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', 's_nexttime',
's_until', 's_until_with', 'scalared', 'sequence', 'shortint', 'shortreal',
'showcancelled', 'signed', 'small', 'solve', 'specify', 'specparam', 'static',
'string', 'strong', 'strong0', 'strong1', 'struct', 'super', 'supply0', 'supply1',
'sync_accept_on', 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout',
'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'tri', 'tri0',
'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef', 'union', 'unique', 'unique0',
'unsigned', 'until', 'until_with', 'untyped', 'use', 'uwire', 'var', 'vectored',
'virtual', 'void', 'wait', 'wait_order', 'wand', 'weak', 'weak0', 'weak1', 'while',
'wildcard', 'wire', 'with', 'within', 'wor', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
(words((
'`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', '`default_nettype',
'`define', '`else', '`elsif', '`end_keywords', '`endcelldefine', '`endif',
'`ifdef', '`ifndef', '`include', '`line', '`nounconnected_drive', '`pragma',
'`resetall', '`timescale', '`unconnected_drive', '`undef', '`undefineall'),
suffix=r'\b'),
Comment.Preproc),
(words((
'$display', '$displayb', '$displayh', '$displayo', '$dumpall', '$dumpfile',
'$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', '$dumpports',
'$dumpportsall', '$dumpportsflush', '$dumpportslimit', '$dumpportsoff',
'$dumpportson', '$dumpvars', '$fclose', '$fdisplay', '$fdisplayb',
'$fdisplayh', '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc',
'$fgets', '$finish', '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro',
'$fopen', '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh',
'$fstrobeo', '$ftell', '$fwrite', '$fwriteb', '$fwriteh', '$fwriteo',
'$monitor', '$monitorb', '$monitorh', '$monitoro', '$monitoroff',
'$monitoron', '$plusargs', '$random', '$readmemb', '$readmemh', '$rewind',
'$sformat', '$sformatf', '$sscanf', '$strobe', '$strobeb', '$strobeh', '$strobeo',
'$swrite', '$swriteb', '$swriteh', '$swriteo', '$test', '$ungetc',
'$value$plusargs', '$write', '$writeb', '$writeh', '$writememb',
'$writememh', '$writeo'), suffix=r'\b'),
Name.Builtin),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(words((
'byte', 'shortint', 'int', 'longint', 'integer', 'time',
'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'\$?[a-zA-Z_]\w*', Name),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
.. versionadded:: 1.5
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'--.*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-z_]\w*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\"]*"', String),
(r'(library)(\s+)([a-z_]\w*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*\.)(all)',
bygroups(Keyword, Text, Name.Namespace, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(std|ieee)(\.[a-z_]\w*)',
bygroups(Name.Namespace, Name.Namespace)),
(words(('std', 'ieee', 'work'), suffix=r'\b'),
Name.Namespace),
(r'(entity|component)(\s+)([a-z_]\w*)',
bygroups(Keyword, Text, Name.Class)),
(r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)'
r'(of)(\s+)([a-z_]\w*)(\s+)(is)',
bygroups(Keyword, Text, Name.Class, Text, Keyword, Text,
Name.Class, Text, Keyword)),
(r'([a-z_]\w*)(:)(\s+)(process|for)',
bygroups(Name.Class, Operator, Text, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Text), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-z_]\w*', Name),
],
'endblock': [
include('keywords'),
(r'[a-z_]\w*', Name.Class),
(r'(\s+)', Text),
(r';', Punctuation, '#pop'),
],
'types': [
(words((
'boolean', 'bit', 'character', 'severity_level', 'integer', 'time',
'delay_length', 'natural', 'positive', 'string', 'bit_vector',
'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector',
'std_logic', 'std_logic_vector', 'signed', 'unsigned'), suffix=r'\b'),
Keyword.Type),
],
'keywords': [
(words((
'abs', 'access', 'after', 'alias', 'all', 'and',
'architecture', 'array', 'assert', 'attribute', 'begin', 'block',
'body', 'buffer', 'bus', 'case', 'component', 'configuration',
'constant', 'disconnect', 'downto', 'else', 'elsif', 'end',
'entity', 'exit', 'file', 'for', 'function', 'generate',
'generic', 'group', 'guarded', 'if', 'impure', 'in',
'inertial', 'inout', 'is', 'label', 'library', 'linkage',
'literal', 'loop', 'map', 'mod', 'nand', 'new',
'next', 'nor', 'not', 'null', 'of', 'on',
'open', 'or', 'others', 'out', 'package', 'port',
'postponed', 'procedure', 'process', 'pure', 'range', 'record',
'register', 'reject', 'rem', 'return', 'rol', 'ror', 'select',
'severity', 'signal', 'shared', 'sla', 'sll', 'sra',
'srl', 'subtype', 'then', 'to', 'transport', 'type',
'units', 'until', 'use', 'variable', 'wait', 'when',
'while', 'with', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-f_]+#?', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float),
(r'X"[0-9a-f_]+"', Number.Hex),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[01_]+"', Number.Bin),
],
}
| gpl-3.0 |
radekp/qt | doc/src/diagrams/programs/mdiarea.py | 1 | 3712 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation (qt-info@nokia.com)
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## No Commercial Usage
## This file contains pre-release code and may not be distributed.
## You may use this file in accordance with the terms and conditions
## contained in the Technology Preview License Agreement accompanying
## this package.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
##
##
##
##
##
##
##
##
## $QT_END_LICENSE$
##
#############################################################################
import sys
from PyQt4.QtCore import SIGNAL
from PyQt4.QtGui import QApplication, QColor, QIcon, QLabel, QMdiArea, QPixmap, \
QPushButton, QTableWidget, QTableWidgetItem, QTextEdit
class Changer:
def __init__(self, mdiArea):
self.mdiArea = mdiArea
self.state = 0
def change(self):
if self.state == 0:
self.mdiArea.cascadeSubWindows()
self.mdiArea.setWindowTitle("Cascade")
elif self.state == 1:
self.mdiArea.tileSubWindows()
self.mdiArea.setWindowTitle("Tile")
self.state = (self.state + 1) % 2
if __name__ == "__main__":
app = QApplication(sys.argv)
pixmap = QPixmap(16, 16)
pixmap.fill(QColor(0, 0, 0, 0))
icon = QIcon(pixmap)
app.setWindowIcon(icon)
mdiArea = QMdiArea()
textEdit = QTextEdit()
textEdit.setPlainText("Qt Quarterly is a paper-based newsletter "
"exclusively available to Qt customers. Every "
"quarter we mail out an issue that we hope will "
"bring added insight and pleasure to your Qt "
"programming, with high-quality technical articles "
"written by Qt experts.")
textWindow = mdiArea.addSubWindow(textEdit)
textWindow.setWindowTitle("A Text Editor")
label = QLabel()
label.setPixmap(QPixmap("../../images/qt-logo.png"))
labelWindow = mdiArea.addSubWindow(label)
labelWindow.setWindowTitle("A Label")
items = (("Henry", 23), ("Bill", 56), ("Susan", 19), ("Jane", 47))
table = QTableWidget(len(items), 2)
for i in range(len(items)):
name, age = items[i]
item = QTableWidgetItem(name)
table.setItem(i, 0, item)
item = QTableWidgetItem(str(age))
table.setItem(i, 1, item)
tableWindow = mdiArea.addSubWindow(table)
tableWindow.setWindowTitle("A Table Widget")
mdiArea.show()
changer = Changer(mdiArea)
button = QPushButton("Cascade")
button.connect(button, SIGNAL("clicked()"), changer.change)
button.show()
sys.exit(app.exec_())
| lgpl-2.1 |
titansgroup/python-phonenumbers | python/phonenumbers/data/region_MX.py | 8 | 4158 | """Auto-generated file, do not edit by hand. MX metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MX = PhoneMetadata(id='MX', country_code=52, international_prefix='0[09]',
general_desc=PhoneNumberDesc(national_number_pattern='[1-9]\\d{9,10}', possible_number_pattern='\\d{7,11}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:33|55|81)\\d{8}|(?:2(?:0[01]|2[2-9]|3[1-35-8]|4[13-9]|7[1-689]|8[1-578]|9[467])|3(?:1[1-79]|[2458][1-9]|7[1-8]|9[1-5])|4(?:1[1-57-9]|[24-6][1-9]|[37][1-8]|8[1-35-9]|9[2-689])|5(?:88|9[1-79])|6(?:1[2-68]|[234][1-9]|5[1-3689]|6[12457-9]|7[1-7]|8[67]|9[4-8])|7(?:[13467][1-9]|2[1-8]|5[13-9]|8[1-69]|9[17])|8(?:2[13-689]|3[1-6]|4[124-6]|6[1246-9]|7[1-378]|9[12479])|9(?:1[346-9]|2[1-4]|3[2-46-8]|5[1348]|[69][1-9]|7[12]|8[1-8]))\\d{7}', possible_number_pattern='\\d{7,10}', example_number='2221234567'),
mobile=PhoneNumberDesc(national_number_pattern='1(?:(?:33|55|81)\\d{8}|(?:2(?:2[2-9]|3[1-35-8]|4[13-9]|7[1-689]|8[1-578]|9[467])|3(?:1[1-79]|[2458][1-9]|7[1-8]|9[1-5])|4(?:1[1-57-9]|[24-6][1-9]|[37][1-8]|8[1-35-9]|9[2-689])|5(?:88|9[1-79])|6(?:1[2-68]|[2-4][1-9]|5[1-3689]|6[12457-9]|7[1-7]|8[67]|9[4-8])|7(?:[13467][1-9]|2[1-8]|5[13-9]|8[1-69]|9[17])|8(?:2[13-689]|3[1-6]|4[124-6]|6[1246-9]|7[1-378]|9[12479])|9(?:1[346-9]|2[1-4]|3[2-46-8]|5[1348]|[69][1-9]|7[12]|8[1-8]))\\d{7})', possible_number_pattern='\\d{11}', example_number='12221234567'),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|88)\\d{7}', possible_number_pattern='\\d{10}', example_number='8001234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='900\\d{7}', possible_number_pattern='\\d{10}', example_number='9001234567'),
shared_cost=PhoneNumberDesc(national_number_pattern='300\\d{7}', possible_number_pattern='\\d{10}', example_number='3001234567'),
personal_number=PhoneNumberDesc(national_number_pattern='500\\d{7}', possible_number_pattern='\\d{10}', example_number='5001234567'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='01',
national_prefix_for_parsing='0[12]|04[45](\\d{10})',
national_prefix_transform_rule='1\\1',
number_format=[NumberFormat(pattern='([358]\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['33|55|81'], national_prefix_formatting_rule='01 \\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2467]|3[0-2457-9]|5[089]|8[02-9]|9[0-35-9]'], national_prefix_formatting_rule='01 \\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(1)([358]\\d)(\\d{4})(\\d{4})', format='044 \\2 \\3 \\4', leading_digits_pattern=['1(?:33|55|81)'], national_prefix_formatting_rule='\\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(1)(\\d{3})(\\d{3})(\\d{4})', format='044 \\2 \\3 \\4', leading_digits_pattern=['1(?:[2467]|3[0-2457-9]|5[089]|8[2-9]|9[1-35-9])'], national_prefix_formatting_rule='\\1', national_prefix_optional_when_formatting=True)],
intl_number_format=[NumberFormat(pattern='([358]\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['33|55|81']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2467]|3[0-2457-9]|5[089]|8[02-9]|9[0-35-9]']),
NumberFormat(pattern='(1)([358]\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['1(?:33|55|81)']),
NumberFormat(pattern='(1)(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['1(?:[2467]|3[0-2457-9]|5[089]|8[2-9]|9[1-35-9])'])],
leading_zero_possible=True,
mobile_number_portable_region=True)
| apache-2.0 |
VisionSystemsInc/voxel_globe | voxel_globe/channel_test/tasks.py | 2 | 1715 | from voxel_globe.common_tasks import shared_task, VipTask
from voxel_globe.websockets import ws_logger
import random, time
@shared_task(base=VipTask, bind=True)
def success_task(self):
ws_logger.debug(self, "d " + str(random.random()))
ws_logger.info(self, "i " + str(random.random()))
ws_logger.warn(self, "w " + str(random.random()))
ws_logger.message(self, "Important message about task %s!!" % self.request.id)
self.update_state(state='Initializing', meta={"site_name": "Exciting text"})
self.update_state(state='Processing', meta={"site_name": "Exciting text"})
return {"site_name": "Exciting text"}
@shared_task(base=VipTask, bind=True)
def fail_task(self):
ws_logger.error(self, "e " + str(random.random()))
ws_logger.fatal(self, "f " + str(random.random()))
raise ValueError("Because reasons")
@shared_task(base=VipTask, bind=True)
def long_task(self):
self.update_state(state="PROCESSING", meta={"index":0, "total": 5})
time.sleep(5)
ws_logger.message(self, "Important message 1")
self.update_state(state="PROCESSING", meta={"index":1, "total": 5})
time.sleep(5)
ws_logger.message(self, "Important message 2")
self.update_state(state="PROCESSING", meta={"index":2, "total": 5})
time.sleep(5)
ws_logger.message(self, "Important message 3")
self.update_state(state="PROCESSING", meta={"index":3, "total": 5})
# self.update_state(state="PROCESSING", meta={"poetry":"let us go then you and i"})
time.sleep(5)
ws_logger.message(self, "Important message 4")
self.update_state(state="PROCESSING", meta={"index":4, "total": 5})
time.sleep(5)
ws_logger.message(self, "Important message 5")
self.update_state(state="PROCESSING", meta={"index":5, "total": 5})
| mit |
VerifiableRobotics/ReSpeC | test/formula/activation_outcomes_test.py | 2 | 24282 | #!/usr/bin/env python
from respec.formula.activation_outcomes import *
import unittest
class ActionFormulaGenerationTests(unittest.TestCase):
"""Test the generation of Activation-Outcomes action formulas"""
def setUp(self):
"""Gets called before every test case."""
self.sys_props = ['dance', 'sleep']
self.outcomes = ['completed', 'failed', 'preempted']
def tearDown(self):
"""Gets called after every test case."""
del self.sys_props
del self.outcomes
def test_base_class(self):
formula = ActivationOutcomesFormula(self.sys_props, self.outcomes)
# Test whether the obvious things are working as expected
self.assertItemsEqual(self.outcomes, formula.outcomes)
self.assertEqual(list(), formula.formulas)
# Test whether the activation propositions are generated correctly
expected_act_props = ['sleep_a', 'dance_a']
self.assertItemsEqual(expected_act_props, formula.sys_props)
# Test whether the outcome propositions are generated correctly
expected_out_props = {'dance': ['dance_c', 'dance_f', 'dance_p'],
'sleep': ['sleep_c', 'sleep_f', 'sleep_p']}
self.assertItemsEqual(expected_out_props, formula.outcome_props)
# Test whether the environment propositions are generated correctly
expected_env_props = ['dance_c', 'dance_f', 'dance_p',
'sleep_c', 'sleep_f', 'sleep_p']
self.assertItemsEqual(expected_env_props, formula.env_props)
def test_constructor_raises_exceptions(self):
self.assertRaises(TypeError, ActivationOutcomesFormula, ['dance', 1.0])
self.assertRaises(ValueError, ActivationOutcomesFormula, ['dance_a'])
self.assertRaises(ValueError, ActivationOutcomesFormula, ['dance'],[])
self.assertRaises(TypeError, ActivationOutcomesFormula, ['dance'],[2])
self.assertRaises(ValueError, ActivationOutcomesFormula, ['dance'],
['completed', 'capitalized', 'called', 'calculated'])
def test_bad_activation_prop_request_raises_exception(self):
from respec.formula.activation_outcomes import _get_act_prop
self.assertRaises(ValueError, _get_act_prop, 'dance_a')
def test_outcome_prop_from_activation(self):
from respec.formula.activation_outcomes import _get_out_prop
self.assertEqual(_get_out_prop('dance_a', 'failed'), 'dance_f')
def test_mutex_formula(self):
formula = OutcomeMutexFormula(['dance'],
['completed', 'failed', 'preempted'])
expected_formula_c = 'next(dance_c) -> (next(! dance_f) & next(! dance_p))'
expected_formula_f = 'next(dance_f) -> (next(! dance_c) & next(! dance_p))'
expected_formula_p = 'next(dance_p) -> (next(! dance_c) & next(! dance_f))'
expected_formulas = [expected_formula_c, expected_formula_f, expected_formula_p]
self.assertEqual('env_trans', formula.type)
self.assertItemsEqual(expected_formulas, formula.formulas)
def test_mutex_single_outcome(self):
formula = OutcomeMutexFormula(['dance'], outcomes = ['completed'])
self.assertItemsEqual(list(), formula.formulas)
def test_action_deactivation_formula_single_outcome(self):
formula = PropositionDeactivationFormula(['dance', 'sleep'])
expected_formula_1 = '(dance_a & next(dance_c)) -> next(! dance_a)'
expected_formula_2 = '(sleep_a & next(sleep_c)) -> next(! sleep_a)'
expected_formulas = [expected_formula_1, expected_formula_2]
self.assertItemsEqual(expected_formulas, formula.formulas)
def test_action_deactivation_formula_multiple_outcomes(self):
formula = PropositionDeactivationFormula(
sys_props = ['dance', 'sleep'],
outcomes = ['completed', 'failed'])
self.assertEqual('sys_trans', formula.type)
expected_formula_1 = '(dance_a & (next(dance_c) | next(dance_f))) -> next(! dance_a)'
expected_formula_2 = '(sleep_a & (next(sleep_c) | next(sleep_f))) -> next(! sleep_a)'
expected_formulas = [expected_formula_1, expected_formula_2]
self.assertItemsEqual(expected_formulas, formula.formulas)
def test_action_outcome_constraints(self):
formula = ActionOutcomeConstraintsFormula(
actions = ['dance'],
outcomes = ['completed', 'failed'])
self.assertEqual('env_trans', formula.type)
# expected_formula_1 = '((dance_c | dance_f) & dance_a) -> (next(dance_c) | next(dance_f))'
expected_formula_2a = '(! dance_c & ! dance_a) -> next(! dance_c)'
expected_formula_2b = '(! dance_f & ! dance_a) -> next(! dance_f)'
# expected_formulas = [expected_formula_1,
# expected_formula_2a, expected_formula_2b]
expected_formulas = [expected_formula_2a, expected_formula_2b]
self.assertItemsEqual(expected_formulas, formula.formulas)
def test_action_outcome_persistence(self):
formula = ActionOutcomePersistenceFormula(
actions = ['dance'],
outcomes = ['completed', 'failed'])
self.assertEqual('env_trans', formula.type)
expected_formula_1 = '(dance_c & ! dance_a) -> next(dance_c)'
expected_formula_2 = '(dance_f & ! dance_a) -> next(dance_f)'
expected_formulas = [expected_formula_1, expected_formula_2]
self.assertItemsEqual(expected_formulas, formula.formulas)
def test_action_fairness_conditions_multiple_outcomes(self):
formula = ActionFairnessConditionsFormula(
actions = ['dance'],
outcomes = ['completed', 'failed'])
expected_formula_1a = '(dance_a & (next(dance_c) | next(dance_f)))' # outcome
# expected_formula_1b = '(! dance_a & (next(! dance_c) & next(! dance_f)))' # deactivation ??
# expected_formula_1c = '(! dance_a & (next(dance_c) | next(dance_f)))' # persistence
# expected_formula_1 = '(' + expected_formula_1a + ' | ' + \
# expected_formula_1b + ' | ' + \
# expected_formula_1c + ')'
# expected_formula_2a = '(dance_a & next(! dance_a))'
# expected_formula_2b = '(! dance_a & next(dance_a))' # change
# expected_formula_2 = '(' + expected_formula_2a + ' | ' + \
# expected_formula_2b + ')'
# expected_formula = '(' + expected_formula_1 + ' | ' + \
# expected_formula_2 + ')'
expected_formula = '(' + expected_formula_1a + ' | ' + \
'! dance_a' + ')'
self.assertItemsEqual([expected_formula], formula.formulas)
def test_preconditions_formula(self):
formula = PreconditionsFormula(action = 'run',
preconditions = ['step', 'walk'])
expected_formula = '(! step_c | ! walk_c) -> ! run_a'
self.assertEqual('sys_trans', formula.type)
self.assertItemsEqual([expected_formula], formula.formulas)
class TSFormulaGenerationTests(unittest.TestCase):
"""Test the generation of Activation-Outcomes 'topology' formulas"""
def setUp(self):
"""Gets called before every test case."""
self.outcomes = ['completed', 'failed']
self.ts = {'r1': ['r1', 'r2', 'r3'],
'r2': ['r2'],
'r3': ['r3', 'r1']}
def tearDown(self):
"""Gets called after every test case."""
del self.outcomes
del self.ts
def test_bad_key_raises_exception(self):
bad_ts = {100: ['ok_value']} # bad key, not str
self.assertRaises(TypeError, ActivationOutcomesFormula,
sys_props = [], outcomes = ['completed'], ts = bad_ts)
def test_bad_value_raises_exception(self):
bad_ts = {'ok_key': 'bad_value'} # ok key, but value isn't a list
self.assertRaises(TypeError, ActivationOutcomesFormula,
sys_props = [], outcomes = ['completed'], ts = bad_ts)
def test_bad_values_raise_exception(self):
bad_ts = {'ok_key': [100, 200]} # ok key, bad list elements are not str
self.assertRaises(TypeError, ActivationOutcomesFormula,
sys_props = [], outcomes = ['completed'], ts = bad_ts)
def test_value_not_in_keys_raises_exception(self):
bad_ts = {'key_1': ['key_1', 'key_2']}
self.assertRaises(ValueError, ActivationOutcomesFormula,
sys_props = [], outcomes = ['completed'], ts = bad_ts)
def test_transition_system_conversion(self):
formula = ActivationOutcomesFormula(sys_props = [],
outcomes = ['completed'],
ts = self.ts)
expected_ts = {'r1_c': ['r1_a', 'r2_a', 'r3_a'],
'r2_c': ['r2_a'],
'r3_c': ['r3_a', 'r1_a']}
self.assertDictEqual(formula.ts, expected_ts)
def test_sys_propositions_from_ts(self):
formula = ActivationOutcomesFormula(sys_props = [],
outcomes = ['completed'],
ts = self.ts)
expected_sys_props = ['r1_a', 'r2_a', 'r3_a']
self.assertItemsEqual(actual_seq = formula.sys_props,
expected_seq = expected_sys_props)
def test_env_propositions_from_ts(self):
formula = ActivationOutcomesFormula(sys_props = [],
outcomes = ['completed'],
ts = self.ts)
expected_env_props = ['r1_c', 'r2_c', 'r3_c']
self.assertItemsEqual(actual_seq = formula.env_props,
expected_seq = expected_env_props)
def test_topology_mutex_formula(self):
formula = TopologyMutexFormula(self.ts)
self.assertEqual('env_trans', formula.type)
expected_formula_1 = 'next(r1_c) <-> (! next(r2_c) & ! next(r3_c))'
expected_formula_2 = 'next(r2_c) <-> (! next(r1_c) & ! next(r3_c))'
expected_formula_3 = 'next(r3_c) <-> (! next(r1_c) & ! next(r2_c))'
expected_formulas = [expected_formula_1, expected_formula_2, expected_formula_3]
self.assertItemsEqual(formula.formulas, expected_formulas)
def test_transition_relation_formula(self):
formula = TransitionRelationFormula(self.ts)
self.assertEqual('sys_trans', formula.type)
expected_formula_1 = 'next(r1_c) -> (next(r1_a & ! r2_a & ! r3_a) | ' + \
'next(r2_a & ! r1_a & ! r3_a) | ' + \
'next(r3_a & ! r1_a & ! r2_a) | ' + \
'next(! r1_a & ! r2_a & ! r3_a))'
expected_formula_2 = 'next(r2_c) -> (next(r2_a & ! r1_a & ! r3_a) | ' + \
'next(! r1_a & ! r2_a & ! r3_a))'
expected_formula_3 = 'next(r3_c) -> (next(r3_a & ! r1_a & ! r2_a) | ' + \
'next(r1_a & ! r2_a & ! r3_a) | ' + \
'next(! r1_a & ! r2_a & ! r3_a))'
expected_formulas = [expected_formula_1, expected_formula_2, expected_formula_3]
self.assertItemsEqual(formula.formulas, expected_formulas)
def test_single_step_change_formula_with_one_outcome(self):
formula = SingleStepChangeFormula(self.ts) # 'completed' used by default
self.assertEqual('env_trans', formula.type)
expected_formula_1a = '(r1_c & (r1_a & ! r2_a & ! r3_a)) -> next(r1_c)'
expected_formula_1b = '(r1_c & (r2_a & ! r1_a & ! r3_a)) -> (next(r1_c) | next(r2_c))'
expected_formula_1c = '(r1_c & (r3_a & ! r1_a & ! r2_a)) -> (next(r3_c) | next(r1_c))'
expected_formula_2 = '(r2_c & (r2_a & ! r1_a & ! r3_a)) -> next(r2_c)'
expected_formula_3a = '(r3_c & (r1_a & ! r2_a & ! r3_a)) -> (next(r3_c) | next(r1_c))'
expected_formula_3b = '(r3_c & (r3_a & ! r1_a & ! r2_a)) -> next(r3_c)'
expected_formulas = [expected_formula_1a, expected_formula_1b, expected_formula_1c,
expected_formula_2, expected_formula_3a, expected_formula_3b]
self.assertItemsEqual(formula.formulas, expected_formulas)
def test_single_step_change_formula_with_multiple_outcomes(self):
formula = SingleStepChangeFormula(self.ts, ['completed', 'failed'])
self.assertEqual('env_trans', formula.type)
expected_formula_1a = '(r1_c & (r1_a & ! r2_a & ! r3_a)) -> (next(r1_c) | next(r1_f))'
expected_formula_1b = '(r1_c & (r2_a & ! r1_a & ! r3_a)) -> (next(r1_c) | next(r2_f) | next(r2_c))'
expected_formula_1c = '(r1_c & (r3_a & ! r1_a & ! r2_a)) -> (next(r3_c) | next(r3_f) | next(r1_c))'
expected_formula_2 = '(r2_c & (r2_a & ! r1_a & ! r3_a)) -> (next(r2_f) | next(r2_c))'
expected_formula_3a = '(r3_c & (r1_a & ! r2_a & ! r3_a)) -> (next(r3_c) | next(r1_c) | next(r1_f))'
expected_formula_3b = '(r3_c & (r3_a & ! r1_a & ! r2_a)) -> (next(r3_c) | next(r3_f))'
expected_formulas = [expected_formula_1a, expected_formula_1b, expected_formula_1c,
expected_formula_2, expected_formula_3a, expected_formula_3b]
self.assertItemsEqual(formula.formulas, expected_formulas)
def test_topology_outcome_constraint(self):
formula = TopologyOutcomeConstraintFormula(self.ts, ['completed', 'failed'])
self.assertEqual('env_trans', formula.type)
expected_formula_1 = '(! r1_c & ! r1_a) -> next(! r1_c)'
expected_formula_2 = '(! r2_c & ! r2_a) -> next(! r2_c)'
expected_formula_3 = '(! r3_c & ! r3_a) -> next(! r3_c)'
expected_formula_4 = '(! r1_f & ! r1_a) -> next(! r1_f)'
expected_formula_5 = '(! r2_f & ! r2_a) -> next(! r2_f)'
expected_formula_6 = '(! r3_f & ! r3_a) -> next(! r3_f)'
expected_formulas = [expected_formula_1, expected_formula_2, expected_formula_3,
expected_formula_4, expected_formula_5, expected_formula_6]
self.assertItemsEqual(formula.formulas, expected_formulas)
def test_topology_outcome_persistence(self):
formula = TopologyOutcomePersistenceFormula(self.ts, ['completed', 'failed'])
self.assertEqual('env_trans', formula.type)
expected_formula_1a = '(r1_c & (! r1_a & ! r2_a & ! r3_a)) -> next(r1_c)'
expected_formula_1b = '(r1_f & (! r1_a & ! r2_a & ! r3_a)) -> next(r1_f)'
expected_formula_2a = '(r2_c & (! r1_a & ! r2_a & ! r3_a)) -> next(r2_c)'
expected_formula_2b = '(r2_f & (! r1_a & ! r2_a & ! r3_a)) -> next(r2_f)'
expected_formula_3a = '(r3_c & (! r1_a & ! r2_a & ! r3_a)) -> next(r3_c)'
expected_formula_3b = '(r3_f & (! r1_a & ! r2_a & ! r3_a)) -> next(r3_f)'
expected_formulas = [expected_formula_1a, expected_formula_1b, expected_formula_2a,
expected_formula_2b, expected_formula_3a, expected_formula_3b]
self.assertItemsEqual(formula.formulas, expected_formulas)
def test_topology_fairness_conditions_single_outcome(self):
formula = TopologyFairnessConditionsFormula(self.ts)
self.assertEqual('env_liveness', formula.type)
expected_formula_1a = '((r1_a & ! r2_a & ! r3_a) & next(r1_c))'
expected_formula_1b = '((r2_a & ! r1_a & ! r3_a) & next(r2_c))'
expected_formula_1c = '((r3_a & ! r1_a & ! r2_a) & next(r3_c))'
expected_formula_1 = '(' + expected_formula_1a + ' | ' + \
expected_formula_1b + ' | ' + \
expected_formula_1c + ')' # completion
# expected_formula_2a = '((r1_a & ! r2_a & ! r3_a) & ! next(r1_a & ! r2_a & ! r3_a))'
# expected_formula_2b = '((r2_a & ! r1_a & ! r3_a) & ! next(r2_a & ! r1_a & ! r3_a))'
# expected_formula_2c = '((r3_a & ! r1_a & ! r2_a) & ! next(r3_a & ! r1_a & ! r2_a))'
# expected_formula_2 = '(' + expected_formula_2a + ' | ' + \
# expected_formula_2b + ' | ' + \
# expected_formula_2c + ')' # change
expected_formula_3 = '(! r1_a & ! r2_a & ! r3_a)' # activate nothing
# expected_formula = '(' + expected_formula_1 + ' | ' + \
# expected_formula_2 + ' | ' + \
# expected_formula_3 + ')'
expected_formula = '(' + expected_formula_1 + ' | ' + \
expected_formula_3 + ')'
self.assertItemsEqual([expected_formula], formula.formulas)
def test_topology_fairness_conditions_with_outcomes(self):
# self.fail('Incomplete test!')
formula = TopologyFairnessConditionsFormula(
ts = self.ts,
outcomes = ['completed', 'failed'])
self.assertEqual('env_liveness', formula.type)
expected_formula_1a = '((r1_a & ! r2_a & ! r3_a) & (next(r1_c) | next(r1_f)))'
expected_formula_1b = '((r2_a & ! r1_a & ! r3_a) & (next(r2_c) | next(r2_f)))'
expected_formula_1c = '((r3_a & ! r1_a & ! r2_a) & (next(r3_c) | next(r3_f)))'
expected_formula_1 = '(' + expected_formula_1a + ' | ' + \
expected_formula_1b + ' | ' + \
expected_formula_1c + ')' # completion
# expected_formula_2a = '((r1_a & ! r2_a & ! r3_a) & ! next(r1_a & ! r2_a & ! r3_a))'
# expected_formula_2b = '((r2_a & ! r1_a & ! r3_a) & ! next(r2_a & ! r1_a & ! r3_a))'
# expected_formula_2c = '((r3_a & ! r1_a & ! r2_a) & ! next(r3_a & ! r1_a & ! r2_a))'
# expected_formula_2 = '(' + expected_formula_2a + ' | ' + \
# expected_formula_2b + ' | ' + \
# expected_formula_2c + ')' # change (same)
expected_formula_3 = '(! r1_a & ! r2_a & ! r3_a)' # activate nothing
# expected_formula = '(' + expected_formula_1 + ' | ' + \
# expected_formula_2 + ' | ' + \
# expected_formula_3 + ')'
expected_formula = '(' + expected_formula_1 + ' | ' + \
expected_formula_3 + ')'
self.assertItemsEqual([expected_formula], formula.formulas)
class GoalFormulaGenerationTests(unittest.TestCase):
"""Test the generation of Activation-Outcomes liveness requirements"""
def setUp(self):
"""Gets called before every test case."""
self.sys_props = ['dance', 'sleep', 'swim']
def tearDown(self):
"""Gets called after every test case."""
del self.sys_props
def test_successful_outcome_formula(self):
formula = SuccessfulOutcomeFormula(conditions = ['dance', 'sleep'],
success = 'finished')
self.assertEqual('sys_trans', formula.type)
expected_sys_props = ['finished', 'dance_a', 'dance_m', 'sleep_a', 'sleep_m']
self.assertItemsEqual(expected_sys_props, formula.sys_props)
self.assertItemsEqual(['dance_c', 'sleep_c'], formula.env_props)
expected_formula_0 = 'finished <-> (dance_m & sleep_m)'
expected_formula_1 = '((dance_a & next(dance_c)) | dance_m) <-> next(dance_m)'
expected_formula_2 = '((sleep_a & next(sleep_c)) | sleep_m) <-> next(sleep_m)'
expected_formulas = [expected_formula_0, expected_formula_1,
expected_formula_2]
self.assertItemsEqual(expected_formulas, formula.formulas)
def test_strict_goal_ordering(self):
formula = SuccessfulOutcomeFormula(conditions = ['dance', 'sleep', 'swim'],
success = 'finished', strict_order = True)
expected_formula_1 = '((dance_a & next(dance_c)) | dance_m) <-> next(dance_m)'
expected_formula_2 = '(((sleep_a & next(sleep_c)) & dance_m) | sleep_m) <-> next(sleep_m)'
expected_formula_3 = '(((swim_a & next(swim_c)) & sleep_m) | swim_m) <-> next(swim_m)'
self.assertIn(expected_formula_1, formula.formulas)
self.assertIn(expected_formula_2, formula.formulas)
def test_failed_outcome_formula(self):
formula = FailedOutcomeFormula(conditions = ['dance', 'sleep'],
failure = 'failed')
self.assertEqual('sys_trans', formula.type)
expected_sys_props = ['failed', 'dance_a', 'sleep_a']
self.assertItemsEqual(expected_sys_props, formula.sys_props)
expected_env_props = ['dance_f', 'sleep_f']
self.assertItemsEqual(expected_env_props, formula.env_props)
expected_formula = 'next(failed) <-> ((next(dance_f) | next(sleep_f)) | failed)'
self.assertItemsEqual([expected_formula], formula.formulas)
def test_simple_liveness_generated_with_completion_props(self):
formula = SimpleLivenessRequirementActOutFormula(goal = 'dance',
sm_outcome = 'failed')
self.assertEqual('sys_liveness', formula.type)
expected_formulas = ['((dance_a & next(dance_c)) | failed)']
self.assertItemsEqual(expected_formulas, formula.formulas)
def test_system_liveness_retry_after_failure(self):
formula = RetryAfterFailureFormula(failures = ['dance', 'sleep'])
self.assertEqual('sys_liveness', formula.type)
expected_formulas = ['dance_f -> dance_a', 'sleep_f -> sleep_a']
self.assertItemsEqual(expected_formulas, formula.formulas)
class ICFormulaGenerationTests(unittest.TestCase):
"""Test the generation of Activation-Outcomes initial condition formulas"""
def setUp(self):
"""Gets called before every test case."""
self.sys_props = ['dance', 'sleep', 'swim']
self.ts = {'r1': ['r1', 'r2', 'r3'],
'r2': ['r2'],
'r3': ['r3', 'r1']}
def tearDown(self):
"""Gets called after every test case."""
del self.sys_props, self.ts
def test_sys_init_from_true_actions(self):
sys_props = ['dance_a', 'sleep_a', 'swim_a']
true_props = ['dance', 'swim']
formula = SystemInitialConditions(sys_props, true_props)
self.assertEqual('sys_init', formula.type)
self.assertItemsEqual(sys_props, formula.sys_props)
self.assertItemsEqual(list(), formula.env_props)
expected_formula = ['dance_a', 'swim_a', '! sleep_a']
self.assertItemsEqual(expected_formula, formula.formulas)
def test_env_init_from_true_actions(self):
env_props = ['dance_c', 'sleep_c', 'swim_c',
'dance_f', 'sleep_f', 'swim_f']
true_props = ['dance', 'swim']
formula = EnvironmentInitialConditions(env_props, true_props)
self.assertEqual('env_init', formula.type)
self.assertItemsEqual(list(), formula.sys_props)
self.assertItemsEqual(env_props, formula.env_props)
expected_formula = ['dance_c', '! dance_f', 'swim_c', '! swim_f',
'! sleep_c', '! sleep_f']
self.assertItemsEqual(expected_formula, formula.formulas)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
| bsd-3-clause |
chrrrles/ansible | lib/ansible/utils/module_docs_fragments/cloudstack.py | 125 | 2515 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard cloudstack documentation fragment
DOCUMENTATION = '''
options:
api_key:
description:
- API key of the CloudStack API.
required: false
default: null
api_secret:
description:
- Secret key of the CloudStack API.
required: false
default: null
api_url:
description:
- URL of the CloudStack API e.g. https://cloud.example.com/client/api.
required: false
default: null
api_http_method:
description:
- HTTP method used.
required: false
default: 'get'
choices: [ 'get', 'post' ]
api_timeout:
description:
- HTTP timeout.
required: false
default: 10
api_region:
description:
- Name of the ini section in the C(cloustack.ini) file.
required: false
default: cloudstack
requirements:
- "python >= 2.6"
- "cs >= 0.6.10"
notes:
- Ansible uses the C(cs) library's configuration method if credentials are not
provided by the arguments C(api_url), C(api_key), C(api_secret).
Configuration is read from several locations, in the following order.
- The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and
C(CLOUDSTACK_METHOD). C(CLOUDSTACK_TIMEOUT) environment variables.
- A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
- A C(cloudstack.ini) file in the current working directory.
- A C(.cloudstack.ini) file in the users home directory.
Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini).
Use the argument C(api_region) to select the section name, default section is C(cloudstack).
See https://github.com/exoscale/cs for more information.
- This module supports check mode.
'''
| gpl-3.0 |
MziRintu/kitsune | kitsune/wiki/migrations/0002_auto_20150430_1304.py | 20 | 6062 | # -*- coding: utf-8 -*-
"""
Update list of locale choices in the `document.locale` and `locale.locale` fields.
"""
from __future__ import unicode_literals
from django.db import models, migrations
import kitsune.sumo.models
class Migration(migrations.Migration):
dependencies = [
('wiki', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='document',
options={'ordering': ['display_order', 'id'], 'permissions': [('archive_document', 'Can archive document'), ('edit_needs_change', 'Can edit needs_change')]},
),
migrations.AlterField(
model_name='document',
name='locale',
field=kitsune.sumo.models.LocaleField(default=b'en-US', max_length=7, db_index=True, choices=[(b'af', 'Afrikaans'), (b'ar', '\u0639\u0631\u0628\u064a'), (b'az', 'Az\u0259rbaycanca'), (b'bg', '\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438'), (b'bn-BD', '\u09ac\u09be\u0982\u09b2\u09be (\u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6)'), (b'bn-IN', '\u09ac\u09be\u0982\u09b2\u09be (\u09ad\u09be\u09b0\u09a4)'), (b'bs', 'Bosanski'), (b'ca', 'catal\xe0'), (b'cs', '\u010ce\u0161tina'), (b'da', 'Dansk'), (b'de', 'Deutsch'), (b'ee', '\xc8\u028begbe'), (b'el', '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac'), (b'en-US', 'English'), (b'es', 'Espa\xf1ol'), (b'et', 'eesti keel'), (b'eu', 'Euskara'), (b'fa', '\u0641\u0627\u0631\u0633\u06cc'), (b'fi', 'suomi'), (b'fr', 'Fran\xe7ais'), (b'fy-NL', 'Frysk'), (b'ga-IE', 'Gaeilge (\xc9ire)'), (b'gl', 'Galego'), (b'gu-IN', '\u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0'), (b'ha', '\u0647\u064e\u0631\u0652\u0634\u064e\u0646 \u0647\u064e\u0648\u0652\u0633\u064e'), (b'he', '\u05e2\u05d1\u05e8\u05d9\u05ea'), (b'hi-IN', '\u0939\u093f\u0928\u094d\u0926\u0940 (\u092d\u093e\u0930\u0924)'), (b'hr', 'Hrvatski'), (b'hu', 'Magyar'), (b'id', 'Bahasa Indonesia'), (b'ig', 'As\u1ee5s\u1ee5 Igbo'), (b'it', 'Italiano'), (b'ja', '\u65e5\u672c\u8a9e'), (b'km', '\u1781\u17d2\u1798\u17c2\u179a'), (b'kn', '\u0c95\u0ca8\u0ccd\u0ca8\u0ca1'), (b'ko', '\ud55c\uad6d\uc5b4'), (b'ln', 'Ling\xe1la'), (b'lt', 'lietuvi\u0173 kalba'), (b'ml', '\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02'), (b'ne-NP', '\u0928\u0947\u092a\u093e\u0932\u0940'), (b'nl', 'Nederlands'), (b'no', 'Norsk'), (b'pl', 'Polski'), (b'pt-BR', 'Portugu\xeas (do Brasil)'), (b'pt-PT', 'Portugu\xeas (Europeu)'), (b'ro', 'rom\xe2n\u0103'), (b'ru', '\u0420\u0443\u0441\u0441\u043a\u0438\u0439'), (b'si', '\u0dc3\u0dd2\u0d82\u0dc4\u0dbd'), (b'sk', 'sloven\u010dina'), (b'sl', 'sloven\u0161\u010dina'), (b'sq', 'Shqip'), (b'sr-Cyrl', '\u0421\u0440\u043f\u0441\u043a\u0438'), (b'sw', 'Kiswahili'), (b'sv', 'Svenska'), (b'ta', '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd'), (b'ta-LK', '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd (\u0b87\u0bb2\u0b99\u0bcd\u0b95\u0bc8)'), (b'te', '\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41'), (b'th', '\u0e44\u0e17\u0e22'), (b'tr', 'T\xfcrk\xe7e'), (b'uk', '\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430'), (b'ur', '\u0627\u064f\u0631\u062f\u0648'), (b'vi', 'Ti\u1ebfng Vi\u1ec7t'), (b'wo', 'Wolof'), (b'xh', 'isiXhosa'), (b'yo', '\xe8d\xe8 Yor\xf9b\xe1'), (b'zh-CN', '\u4e2d\u6587 (\u7b80\u4f53)'), (b'zh-TW', '\u6b63\u9ad4\u4e2d\u6587 (\u7e41\u9ad4)'), (b'zu', 'isiZulu')]),
preserve_default=True,
),
migrations.AlterField(
model_name='locale',
name='locale',
field=kitsune.sumo.models.LocaleField(default=b'en-US', max_length=7, db_index=True, choices=[(b'af', 'Afrikaans'), (b'ar', '\u0639\u0631\u0628\u064a'), (b'az', 'Az\u0259rbaycanca'), (b'bg', '\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438'), (b'bn-BD', '\u09ac\u09be\u0982\u09b2\u09be (\u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6)'), (b'bn-IN', '\u09ac\u09be\u0982\u09b2\u09be (\u09ad\u09be\u09b0\u09a4)'), (b'bs', 'Bosanski'), (b'ca', 'catal\xe0'), (b'cs', '\u010ce\u0161tina'), (b'da', 'Dansk'), (b'de', 'Deutsch'), (b'ee', '\xc8\u028begbe'), (b'el', '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac'), (b'en-US', 'English'), (b'es', 'Espa\xf1ol'), (b'et', 'eesti keel'), (b'eu', 'Euskara'), (b'fa', '\u0641\u0627\u0631\u0633\u06cc'), (b'fi', 'suomi'), (b'fr', 'Fran\xe7ais'), (b'fy-NL', 'Frysk'), (b'ga-IE', 'Gaeilge (\xc9ire)'), (b'gl', 'Galego'), (b'gu-IN', '\u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0'), (b'ha', '\u0647\u064e\u0631\u0652\u0634\u064e\u0646 \u0647\u064e\u0648\u0652\u0633\u064e'), (b'he', '\u05e2\u05d1\u05e8\u05d9\u05ea'), (b'hi-IN', '\u0939\u093f\u0928\u094d\u0926\u0940 (\u092d\u093e\u0930\u0924)'), (b'hr', 'Hrvatski'), (b'hu', 'Magyar'), (b'id', 'Bahasa Indonesia'), (b'ig', 'As\u1ee5s\u1ee5 Igbo'), (b'it', 'Italiano'), (b'ja', '\u65e5\u672c\u8a9e'), (b'km', '\u1781\u17d2\u1798\u17c2\u179a'), (b'kn', '\u0c95\u0ca8\u0ccd\u0ca8\u0ca1'), (b'ko', '\ud55c\uad6d\uc5b4'), (b'ln', 'Ling\xe1la'), (b'lt', 'lietuvi\u0173 kalba'), (b'ml', '\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02'), (b'ne-NP', '\u0928\u0947\u092a\u093e\u0932\u0940'), (b'nl', 'Nederlands'), (b'no', 'Norsk'), (b'pl', 'Polski'), (b'pt-BR', 'Portugu\xeas (do Brasil)'), (b'pt-PT', 'Portugu\xeas (Europeu)'), (b'ro', 'rom\xe2n\u0103'), (b'ru', '\u0420\u0443\u0441\u0441\u043a\u0438\u0439'), (b'si', '\u0dc3\u0dd2\u0d82\u0dc4\u0dbd'), (b'sk', 'sloven\u010dina'), (b'sl', 'sloven\u0161\u010dina'), (b'sq', 'Shqip'), (b'sr-Cyrl', '\u0421\u0440\u043f\u0441\u043a\u0438'), (b'sw', 'Kiswahili'), (b'sv', 'Svenska'), (b'ta', '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd'), (b'ta-LK', '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd (\u0b87\u0bb2\u0b99\u0bcd\u0b95\u0bc8)'), (b'te', '\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41'), (b'th', '\u0e44\u0e17\u0e22'), (b'tr', 'T\xfcrk\xe7e'), (b'uk', '\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430'), (b'ur', '\u0627\u064f\u0631\u062f\u0648'), (b'vi', 'Ti\u1ebfng Vi\u1ec7t'), (b'wo', 'Wolof'), (b'xh', 'isiXhosa'), (b'yo', '\xe8d\xe8 Yor\xf9b\xe1'), (b'zh-CN', '\u4e2d\u6587 (\u7b80\u4f53)'), (b'zh-TW', '\u6b63\u9ad4\u4e2d\u6587 (\u7e41\u9ad4)'), (b'zu', 'isiZulu')]),
preserve_default=True,
),
]
| bsd-3-clause |
dharness/pebblePunch | quarry/node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
EndlessDex/euler | 13-largeSum.py | 1 | 5187 | numStr = ('''37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690''')
numArr = [int(num) for num in numStr.split('\n')]
print(sum(numArr))
| mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.3/Lib/idlelib/Percolator.py | 69 | 2600 | from WidgetRedirector import WidgetRedirector
from Delegator import Delegator
class Percolator:
def __init__(self, text):
# XXX would be nice to inherit from Delegator
self.text = text
self.redir = WidgetRedirector(text)
self.top = self.bottom = Delegator(text)
self.bottom.insert = self.redir.register("insert", self.insert)
self.bottom.delete = self.redir.register("delete", self.delete)
self.filters = []
def close(self):
while self.top is not self.bottom:
self.removefilter(self.top)
self.top = None
self.bottom.setdelegate(None); self.bottom = None
self.redir.close(); self.redir = None
self.text = None
def insert(self, index, chars, tags=None):
# Could go away if inheriting from Delegator
self.top.insert(index, chars, tags)
def delete(self, index1, index2=None):
# Could go away if inheriting from Delegator
self.top.delete(index1, index2)
def insertfilter(self, filter):
# Perhaps rename to pushfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is None
filter.setdelegate(self.top)
self.top = filter
def removefilter(self, filter):
# XXX Perhaps should only support popfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is not None
f = self.top
if f is filter:
self.top = filter.delegate
filter.setdelegate(None)
else:
while f.delegate is not filter:
assert f is not self.bottom
f.resetcache()
f = f.delegate
f.setdelegate(filter.delegate)
filter.setdelegate(None)
def main():
class Tracer(Delegator):
def __init__(self, name):
self.name = name
Delegator.__init__(self, None)
def insert(self, *args):
print self.name, ": insert", args
self.delegate.insert(*args)
def delete(self, *args):
print self.name, ": delete", args
self.delegate.delete(*args)
root = Tk()
root.wm_protocol("WM_DELETE_WINDOW", root.quit)
text = Text()
text.pack()
text.focus_set()
p = Percolator(text)
t1 = Tracer("t1")
t2 = Tracer("t2")
p.insertfilter(t1)
p.insertfilter(t2)
root.mainloop()
p.removefilter(t2)
root.mainloop()
p.insertfilter(t2)
p.removefilter(t1)
root.mainloop()
if __name__ == "__main__":
from Tkinter import *
main()
| mit |
hbzhang/rsstack | bower_components/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| mit |
ShiYw/Sigil | 3rdparty/python/Lib/test/test_select.py | 84 | 2742 | import errno
import os
import select
import sys
import unittest
from test import support
@unittest.skipIf((sys.platform[:3]=='win'),
"can't easily test on this system")
class SelectTestCase(unittest.TestCase):
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
def test_error_conditions(self):
self.assertRaises(TypeError, select.select, 1, 2, 3)
self.assertRaises(TypeError, select.select, [self.Nope()], [], [])
self.assertRaises(TypeError, select.select, [self.Almost()], [], [])
self.assertRaises(TypeError, select.select, [], [], [], "not a number")
self.assertRaises(ValueError, select.select, [], [], [], -1)
# Issue #12367: http://www.freebsd.org/cgi/query-pr.cgi?pr=kern/155606
@unittest.skipIf(sys.platform.startswith('freebsd'),
'skip because of a FreeBSD bug: kern/155606')
def test_errno(self):
with open(__file__, 'rb') as fp:
fd = fp.fileno()
fp.close()
try:
select.select([fd], [], [], 0)
except OSError as err:
self.assertEqual(err.errno, errno.EBADF)
else:
self.fail("exception not raised")
def test_returned_list_identity(self):
# See issue #8329
r, w, x = select.select([], [], [], 1)
self.assertIsNot(r, w)
self.assertIsNot(r, x)
self.assertIsNot(w, x)
def test_select(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
if support.verbose:
print('timeout =', tout)
rfd, wfd, xfd = select.select([p], [], [], tout)
if (rfd, wfd, xfd) == ([], [], []):
continue
if (rfd, wfd, xfd) == ([p], [], []):
line = p.readline()
if support.verbose:
print(repr(line))
if not line:
if support.verbose:
print('EOF')
break
continue
self.fail('Unexpected return values from select():', rfd, wfd, xfd)
p.close()
# Issue 16230: Crash on select resized list
def test_select_mutated(self):
a = []
class F:
def fileno(self):
del a[-1]
return sys.__stdout__.fileno()
a[:] = [F()] * 10
self.assertEqual(select.select([], a, []), ([], a[:5], []))
def test_main():
support.run_unittest(SelectTestCase)
support.reap_children()
if __name__ == "__main__":
test_main()
| gpl-3.0 |
RicherMans/QPy | src/qpy.py | 1 | 8949 | import marshal
import subprocess as sub
import os
from inspect import getsource, getargspec
import re
import types
from functools import wraps
from contextlib import contextmanager
import sys
import qsubsettings
from glob import glob
def _globalimports(func):
for name, val in func.__globals__.iteritems():
if isinstance(val, types.ModuleType) and not name.startswith('__'):
yield val.__name__
def _globalaliasimports(func):
for name, modtype in func.func_globals.items():
if isinstance(modtype, types.ModuleType) and not name.startswith('__'):
yield name
# Procedure needs to be executed in the main file, since the locals are only visible from
# here. We use the localmodules as the real name in the produced python scripts for execution
# e.g. the global imports will be set as: import GLOBAL as LOCAL
# localmodules = [key for key in locals().keys()
# if isinstance(locals()[key], type(sys)) and not key.startswith('__')]
# importedmodules = zip(list(_globalimports()), localmodules)
@contextmanager
def stdout_redirected(to=os.devnull):
'''
import os
with stdout_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
'''
fd = sys.stdout.fileno()
# assert that Python and C stdio write using the same file descriptor
####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
with open(to, 'w') as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as
# CLOEXEC may be different
def _getQsubCmd(settings):
return qsubsettings._parseSettings(settings)
def runcluster(numjobs, settings=qsubsettings.smallclustersetting):
'''
The main function of this module. Decorator which helps running a function in parallel
on the gridengine cluster
numjobs : The amount of Jobs which will be run for this function
settings : A dict which contains all qsub commands and parameters.
Can be extended at will, whereas the keys of this dict are the arguments of qsub e.g.
{
-o:outputfile,
-P:gpu.p,
....
}
Usage :
@runcluster(5)
def add(a,b):
return a+b
runs the function add(a,b) on the cluster with 5 spawning Jobs
'''
def decorator(func):
@wraps(func)
def wrap(*args, **kw):
try:
# Check if the @settings decorator did pass different settings
settings = kw['settings']
except:
pass
qsubcmd = _getQsubCmd(settings)
return _run_jobs(qsubcmd, numjobs, func, zip(*args))
return wrap
return decorator
# class SingleFileModuleFinder(modulefinder.ModuleFinder):
# def import_hook(self, name, caller, *arg, **kwarg):
# if caller.__file__ == self.name:
# Only call the parent at the top level.
# return modulefinder.ModuleFinder.import_hook(self, name, caller, *arg,
# **kwarg)
# def __call__(self, node):
# self.name = str(node)
# self.run_script(self.name)
def _getModuleImports(func):
'''
Gets from the given function it's modules imports
returns a list of tuples, where the fist item represents
the full import name and the second is it's local alias
e.g.:
import marshal as mar
The list would have the values:
[(marshal','mar')]
'''
globalimports = list(_globalimports(func))
globalaliases = list(_globalaliasimports(func))
return zip(globalimports, globalaliases)
def _pickleLoadScript(mdict, modules):
'''
mdict: Dictionary containing the following keys:
loaddir: Path to the file which is going to be taken as input
functiondef : The full function definition
functionname: The name of the given function, which will be called
output: The name of the outputfile which will be generated
'''
lines = []
for globalname, localname in modules:
lines.append('import {} as {}'.format(globalname, localname))
lines.append('import marshal')
lines.append("data = marshal.load(open('%(loaddir)s','rb'))" % (mdict))
lines.append("%(functiondef)s" % (mdict))
lines.append("ret=[]")
lines.append('for arg in data:')
lines.append(' ret.append(%(functionname)s(*arg))' % (mdict))
lines.append("marshal.dump(ret,open('%(output)s','wb'))" % (mdict))
return os.linesep.join(lines)
def _suppressedPopen(args):
'''
Same as sub.Popen(args) call but supresses the output
'''
with stdout_redirected():
return sub.Popen(args)
def _run_jobs(qsubcmd, n, func, data):
datachunks = _splitintochunks(data, n)
funcret = []
runningJobs = []
# Parameters which are related to the function which will be decorated
rawsource = getsource(func)
argspec = getargspec(func)
# Since the source has the decoration header, @runcluster we remove it
# Remove the lines not starting with @, which indicates a decorator
filteredlines = re.findall("^(?!@).*", rawsource, re.MULTILINE)
# source = rawsource[firstline:]
source = os.linesep.join(filteredlines)
tmpfiles = [] # Keeps track of all open tempfiles
try:
for i, chunk in enumerate(datachunks):
# Create some tempfiles which will be used as python script and binary
# dumps respectively, cannot use tempfile since marshal does not allow
# to use a wrapper as input
tmpscript = open('{}_run_{}'.format(func.__name__, i + 1), 'w')
datadump = open('{}_data_{}'.format(func.__name__, i + 1), 'w+b')
output = open('{}_out_{}'.format(func.__name__, i + 1), 'w+b')
# output = '{}_out_{}'.format(func.__name__, i + 1)
# Output needs to be closed separately, since we want to keep the
# file on the system as long as the qsub command is runnung
marshal.dump(chunk, datadump)
mdict = {
'functiondef': source,
# The name of the datadump which will be generated using pickle
'loaddir': datadump.name,
'functionname': func.func_name,
'args': argspec.args,
'output': output.name
}
imports = _getModuleImports(func)
tmpscript.write(_pickleLoadScript(mdict, imports))
tmpscript.flush()
# Reset the datadump pointer, otherwise EOFError
datadump.close()
cur_qsub = qsubcmd + [tmpscript.name]
job = _suppressedPopen(cur_qsub)
tmpfiles.append((tmpscript, datadump, output))
runningJobs.append(job)
# execfile(tmpscript.name, dict(), ret)
for job, tmpfilestuple in zip(runningJobs, tmpfiles):
# Since we use the -sync flag, we need to wait for the calling command
# to finish
retcode = job.wait()
# If we have any retcode, we keep the log outputs of the gridengine
# alive
tmpscript, dump, output = tmpfilestuple
tmpscript.close()
if retcode:
raise ValueError(
"An error Occured while running the gridengine, please refer to the logs produced in the calling directory")
else: # Otherwise delete the logs of gridengine
for ftoremove in glob('%s*' % (tmpscript.name)):
absremovepath = os.path.join(os.getcwd(), ftoremove)
os.remove(absremovepath)
output.seek(0)
funcret.extend(marshal.load(output))
output.close()
dump.close()
os.remove(output.name)
os.remove(dump.name)
except:
for f in tmpfiles:
tmpscript, dump, output = f
output.close()
tmpscript.close()
dump.close()
os.remove(output.name)
os.remove(tmpscript.name)
os.remove(dump.name)
return funcret
def _splitintochunks(l, num):
'''
Splits the given list l into roughly equal num chunks as iterator.
It calculates the optimal split for the given NUM in relation to the length of the list l
Note that the returned iterator has not necessary NUM chunks
'''
spl, ext = divmod(len(l), num)
if ext:
spl += 1
return (l[i:i + spl] for i in xrange(0, len(l), spl))
| mit |
alxgu/ansible | test/units/modules/network/f5/test_bigip_gtm_facts.py | 16 | 5948 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
import pytest
pytestmark = []
if sys.version_info < (2, 7):
pytestmark.append(pytest.mark.skip("F5 Ansible modules require Python >= 2.7"))
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
try:
from library.modules.bigip_gtm_facts import Parameters
from library.modules.bigip_gtm_facts import ServerParameters
from library.modules.bigip_gtm_facts import PoolParameters
from library.modules.bigip_gtm_facts import WideIpParameters
from library.modules.bigip_gtm_facts import ModuleManager
from library.modules.bigip_gtm_facts import ServerFactManager
from library.modules.bigip_gtm_facts import PoolFactManager
from library.modules.bigip_gtm_facts import TypedPoolFactManager
from library.modules.bigip_gtm_facts import UntypedPoolFactManager
from library.modules.bigip_gtm_facts import WideIpFactManager
from library.modules.bigip_gtm_facts import TypedWideIpFactManager
from library.modules.bigip_gtm_facts import UntypedWideIpFactManager
from library.modules.bigip_gtm_facts import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.bigip.tm.gtm.pool import A
from f5.utils.responses.handlers import Stats
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_gtm_pool import Parameters
from ansible.modules.network.f5.bigip_gtm_pool import ServerParameters
from ansible.modules.network.f5.bigip_gtm_pool import PoolParameters
from ansible.modules.network.f5.bigip_gtm_pool import WideIpParameters
from ansible.modules.network.f5.bigip_gtm_pool import ModuleManager
from ansible.modules.network.f5.bigip_gtm_pool import ServerFactManager
from ansible.modules.network.f5.bigip_gtm_pool import PoolFactManager
from ansible.modules.network.f5.bigip_gtm_pool import TypedPoolFactManager
from ansible.modules.network.f5.bigip_gtm_pool import UntypedPoolFactManager
from ansible.modules.network.f5.bigip_gtm_pool import WideIpFactManager
from ansible.modules.network.f5.bigip_gtm_pool import TypedWideIpFactManager
from ansible.modules.network.f5.bigip_gtm_pool import UntypedWideIpFactManager
from ansible.modules.network.f5.bigip_gtm_pool import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.bigip.tm.gtm.pool import A
from f5.utils.responses.handlers import Stats
from units.modules.utils import set_module_args
except ImportError:
pytestmark.append(pytest.mark.skip("F5 Ansible modules require the f5-sdk Python library"))
# pytestmark will cause this test to skip but we have to define A so that classes can be
# defined below
A = object
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class FakeStatResource(object):
def __init__(self, obj):
self.entries = obj
class FakeARecord(A):
def __init__(self, *args, **kwargs):
attrs = kwargs.pop('attrs', {})
for key, value in iteritems(attrs):
setattr(self, key, value)
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
include=['pool'],
filter='name.*'
)
p = Parameters(params=args)
assert p.include == ['pool']
assert p.filter == 'name.*'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_get_typed_pool_facts(self, *args):
set_module_args(dict(
include='pool',
password='password',
server='localhost',
user='admin'
))
fixture1 = load_fixture('load_gtm_pool_a_collection.json')
fixture2 = load_fixture('load_gtm_pool_a_example_stats.json')
collection = [FakeARecord(attrs=x) for x in fixture1['items']]
stats = Stats(FakeStatResource(fixture2['entries']))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tfm = TypedPoolFactManager(module=module)
tfm.read_collection_from_device = Mock(return_value=collection)
tfm.read_stats_from_device = Mock(return_value=stats.stat)
tm = PoolFactManager(module=module)
tm.version_is_less_than_12 = Mock(return_value=False)
tm.get_manager = Mock(return_value=tfm)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.get_manager = Mock(return_value=tm)
mm.gtm_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert 'pool' in results
assert len(results['pool']) > 0
assert 'load_balancing_mode' in results['pool'][0]
| gpl-3.0 |
lrr-tum/poncos | vendor/fast-lib/vendor/mosquitto-1.3.5/test/broker/07-will-null.py | 18 | 1460 | #!/usr/bin/env python
# Test whether a client will is transmitted correctly with a null character in the middle.
import struct
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
mid = 53
keepalive = 60
connect_packet = mosq_test.gen_connect("will-qos0-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
subscribe_packet = mosq_test.gen_subscribe(mid, "will/null/test", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
publish_packet = mosq_test.gen_publish("will/null/test", qos=0)
broker = subprocess.Popen(['../../src/mosquitto', '-p', '1888'], stderr=subprocess.PIPE)
try:
time.sleep(0.5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=30)
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
will = subprocess.Popen(['./07-will-null-helper.py'])
will.wait()
if mosq_test.expect_packet(sock, "publish", publish_packet):
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
| gpl-2.0 |
abhattad4/Digi-Menu | django/conf/locale/de/formats.py | 115 | 1100 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
rchlchang/byte1 | lib/werkzeug/script.py | 318 | 11249 | # -*- coding: utf-8 -*-
r'''
werkzeug.script
~~~~~~~~~~~~~~~
.. admonition:: Deprecated Functionality
``werkzeug.script`` is deprecated without replacement functionality.
Python's command line support improved greatly with :mod:`argparse`
and a bunch of alternative modules.
Most of the time you have recurring tasks while writing an application
such as starting up an interactive python interpreter with some prefilled
imports, starting the development server, initializing the database or
something similar.
For that purpose werkzeug provides the `werkzeug.script` module which
helps you writing such scripts.
Basic Usage
-----------
The following snippet is roughly the same in every werkzeug script::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from werkzeug import script
# actions go here
if __name__ == '__main__':
script.run()
Starting this script now does nothing because no actions are defined.
An action is a function in the same module starting with ``"action_"``
which takes a number of arguments where every argument has a default. The
type of the default value specifies the type of the argument.
Arguments can then be passed by position or using ``--name=value`` from
the shell.
Because a runserver and shell command is pretty common there are two
factory functions that create such commands::
def make_app():
from yourapplication import YourApplication
return YourApplication(...)
action_runserver = script.make_runserver(make_app, use_reloader=True)
action_shell = script.make_shell(lambda: {'app': make_app()})
Using The Scripts
-----------------
The script from above can be used like this from the shell now:
.. sourcecode:: text
$ ./manage.py --help
$ ./manage.py runserver localhost 8080 --debugger --no-reloader
$ ./manage.py runserver -p 4000
$ ./manage.py shell
As you can see it's possible to pass parameters as positional arguments
or as named parameters, pretty much like Python function calls.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
'''
from __future__ import print_function
import sys
import inspect
import getopt
from os.path import basename
from werkzeug._compat import iteritems
argument_types = {
bool: 'boolean',
str: 'string',
int: 'integer',
float: 'float'
}
converters = {
'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
'string': str,
'integer': int,
'float': float
}
def run(namespace=None, action_prefix='action_', args=None):
"""Run the script. Participating actions are looked up in the caller's
namespace if no namespace is given, otherwise in the dict provided.
Only items that start with action_prefix are processed as actions. If
you want to use all items in the namespace provided as actions set
action_prefix to an empty string.
:param namespace: An optional dict where the functions are looked up in.
By default the local namespace of the caller is used.
:param action_prefix: The prefix for the functions. Everything else
is ignored.
:param args: the arguments for the function. If not specified
:data:`sys.argv` without the first argument is used.
"""
if namespace is None:
namespace = sys._getframe(1).f_locals
actions = find_actions(namespace, action_prefix)
if args is None:
args = sys.argv[1:]
if not args or args[0] in ('-h', '--help'):
return print_usage(actions)
elif args[0] not in actions:
fail('Unknown action \'%s\'' % args[0])
arguments = {}
types = {}
key_to_arg = {}
long_options = []
formatstring = ''
func, doc, arg_def = actions[args.pop(0)]
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
real_arg = arg.replace('-', '_')
if shortcut:
formatstring += shortcut
if not isinstance(default, bool):
formatstring += ':'
key_to_arg['-' + shortcut] = real_arg
long_options.append(isinstance(default, bool) and arg or arg + '=')
key_to_arg['--' + arg] = real_arg
key_to_arg[idx] = real_arg
types[real_arg] = option_type
arguments[real_arg] = default
try:
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
except getopt.GetoptError as e:
fail(str(e))
specified_arguments = set()
for key, value in enumerate(posargs):
try:
arg = key_to_arg[key]
except IndexError:
fail('Too many parameters')
specified_arguments.add(arg)
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
for key, value in optlist:
arg = key_to_arg[key]
if arg in specified_arguments:
fail('Argument \'%s\' is specified twice' % arg)
if types[arg] == 'boolean':
if arg.startswith('no_'):
value = 'no'
else:
value = 'yes'
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for \'%s\': %s' % (key, value))
newargs = {}
for k, v in iteritems(arguments):
newargs[k.startswith('no_') and k[3:] or k] = v
arguments = newargs
return func(**arguments)
def fail(message, code=-1):
"""Fail with an error."""
print('Error: %s' % message, file=sys.stderr)
sys.exit(code)
def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
actions = {}
for key, value in iteritems(namespace):
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions
def print_usage(actions):
"""Print the usage information. (Help screen)"""
actions = actions.items()
actions.sort()
print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
print(' %s --help' % basename(sys.argv[0]))
print()
print('actions:')
for name, (func, doc, arguments) in actions:
print(' %s:' % name)
for line in doc.splitlines():
print(' %s' % line)
if arguments:
print()
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print(' %s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
))
else:
print(' %-30s%-10s%s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
argtype, default
))
print()
def analyse_action(func):
"""Analyse a function."""
description = inspect.getdoc(func) or 'undocumented action'
arguments = []
args, varargs, kwargs, defaults = inspect.getargspec(func)
if varargs or kwargs:
raise TypeError('variable length arguments for action not allowed.')
if len(args) != len(defaults or ()):
raise TypeError('not all arguments have proper definitions')
for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
if arg.startswith('_'):
raise TypeError('arguments may not start with an underscore')
if not isinstance(definition, tuple):
shortcut = None
default = definition
else:
shortcut, default = definition
argument_type = argument_types[type(default)]
if isinstance(default, bool) and default is True:
arg = 'no-' + arg
arguments.append((arg.replace('_', '-'), shortcut,
default, argument_type))
return func, description, arguments
def make_shell(init_func=None, banner=None, use_ipython=True):
"""Returns an action callback that spawns a new interactive
python shell.
:param init_func: an optional initialization function that is
called before the shell is started. The return
value of this function is the initial namespace.
:param banner: the banner that is displayed before the shell. If
not specified a generic banner is used instead.
:param use_ipython: if set to `True` ipython is used if available.
"""
if banner is None:
banner = 'Interactive Werkzeug Shell'
if init_func is None:
init_func = dict
def action(ipython=use_ipython):
"""Start a new interactive python session."""
namespace = init_func()
if ipython:
try:
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
except ImportError:
pass
else:
sh(global_ns={}, local_ns=namespace)
return
from code import interact
interact(banner, local=namespace)
return action
def make_runserver(app_factory, hostname='localhost', port=5000,
use_reloader=False, use_debugger=False, use_evalex=True,
threaded=False, processes=1, static_files=None,
extra_files=None, ssl_context=None):
"""Returns an action callback that spawns a new development server.
.. versionadded:: 0.5
`static_files` and `extra_files` was added.
..versionadded:: 0.6.1
`ssl_context` was added.
:param app_factory: a function that returns a new WSGI application.
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
:param use_reloader: the default setting for the reloader.
:param use_evalex: the default setting for the evalex flag of the debugger.
:param threaded: the default threading setting.
:param processes: the default number of processes to start.
:param static_files: optional dict of static files.
:param extra_files: optional list of extra files to track for reloading.
:param ssl_context: optional SSL context for running server in HTTPS mode.
"""
def action(hostname=('h', hostname), port=('p', port),
reloader=use_reloader, debugger=use_debugger,
evalex=use_evalex, threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = app_factory()
run_simple(hostname, port, app, reloader, debugger, evalex,
extra_files, 1, threaded, processes,
static_files=static_files, ssl_context=ssl_context)
return action
| apache-2.0 |
pgmillon/ansible | test/units/pytest/plugins/ansible_pytest_coverage.py | 1 | 1260 | """Monkey patch os._exit when running under coverage so we don't lose coverage data in forks, such as with `pytest --boxed`."""
from __future__ import (absolute_import, division, print_function)
def pytest_configure():
try:
import coverage
except ImportError:
coverage = None
try:
test = coverage.Coverage
except AttributeError:
coverage = None
if not coverage:
return
import gc
import os
coverage_instances = []
for obj in gc.get_objects():
if isinstance(obj, coverage.Coverage):
coverage_instances.append(obj)
if not coverage_instances:
coverage_config = os.environ.get('COVERAGE_CONF')
if not coverage_config:
return
coverage_output = os.environ.get('COVERAGE_FILE')
if not coverage_output:
return
cov = coverage.Coverage(config_file=coverage_config)
coverage_instances.append(cov)
else:
cov = None
os_exit = os._exit
def coverage_exit(*args, **kwargs):
for instance in coverage_instances:
instance.stop()
instance.save()
os_exit(*args, **kwargs)
os._exit = coverage_exit
if cov:
cov.start()
| gpl-3.0 |
Schpin/schpin-chassis | schpin_tote/src/lib/stl.py | 1 | 1937 | import subprocess, os
from lib.util import convert_scad
def render(filename, scad_cfg, mirror):
""" renders scad module defined by scad_cfg into stl 'filename' """
assert filename[-1] == 'b'
scad = "../scad/tmp.scad"
with open(scad, "w") as fd:
fd.write("include <model.scad>;\n $fn=32;")
if mirror is not None:
fd.write("mirror({})".format([int(x) for x in mirror]))
args = ",".join([str(convert_scad(x)) for x in scad_cfg['args']])
fd.write(scad_cfg['module'] + "(" + args + ");")
tmp_stl = "../stl/tmp.stl"
print("Rendering: ", filename)
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(['openscad', '-o', tmp_stl, scad], stdout=devnull, stderr=devnull)
subprocess.check_call(['ivcon', tmp_stl, filename])
subprocess.check_call(["admesh", "--write-binary-stl=" + filename, "--scale=0.001", filename])
os.remove(tmp_stl)
os.remove(scad)
def render_chain(segments, name, folder, mirror):
if not os.path.exists("../stl/" + folder):
os.mkdir("../stl/" + folder)
if 'scad' in segments[name]:
render("../stl/" + folder + name + ".stlb", segments[name]['scad'], mirror)
if 'next_segment' in segments[name]:
render_chain(segments, segments[name]['next_segment'], folder, mirror)
def render_stl(cfg):
render("../stl/base_link.stlb", {"module": "body", "args": []}, mirror=None)
if not os.path.exists("../stl/leg/"):
os.mkdir("../stl/leg")
for leg, leg_cfg in cfg['legs'].items():
mirror = None
if 'mirror' in leg_cfg:
mirror = leg_cfg['mirror']
render_chain(
leg_cfg['segments'],
leg_cfg['chain_root'],
folder="leg/" + leg + "/",
mirror=mirror
)
render_chain(
cfg['gimbal']['segments'], cfg['gimbal']['chain_root'], folder="gimbal/", mirror=None
)
| gpl-3.0 |
dantebarba/docker-media-server | plex/Sub-Zero.bundle/Contents/Libraries/Shared/js2py/internals/prototypes/jsregexp.py | 9 | 1627 | from __future__ import unicode_literals
from ..conversions import *
from ..func_utils import *
class RegExpPrototype:
def toString(this, args):
flags = u''
try:
if this.glob:
flags += u'g'
if this.ignore_case:
flags += u'i'
if this.multiline:
flags += u'm'
except:
pass
try:
v = this.value if this.value else u'(?:)'
except:
v = u'(?:)'
return u'/%s/' % v + flags
def test(this, args):
string = get_arg(args, 0)
return RegExpExec(this, string, args.space) is not null
def _exec(
this, args
): # will be changed to exec in base.py. cant name it exec here...
string = get_arg(args, 0)
return RegExpExec(this, string, args.space)
def RegExpExec(this, string, space):
if GetClass(this) != 'RegExp':
raise MakeError('TypeError', 'RegExp.prototype.exec is not generic!')
string = to_string(string)
length = len(string)
i = to_int(this.get('lastIndex')) if this.glob else 0
matched = False
while not matched:
if i < 0 or i > length:
this.put('lastIndex', 0.)
return null
matched = this.match(string, i)
i += 1
start, end = matched.span() #[0]+i-1, matched.span()[1]+i-1
if this.glob:
this.put('lastIndex', float(end))
arr = convert_to_js_type(
[matched.group()] + list(matched.groups()), space=space)
arr.put('index', float(start))
arr.put('input', unicode(string))
return arr
| gpl-3.0 |
bmenendez/20up | tntwrapper.py | 1 | 7920 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016 Borja Menendez Moreno
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Borja Menéndez Moreno <info20up@gmail.com>
This is the API Wrapper for the 20up backup program. This wrapper allows
a client to retrieve information about his specific account.
"""
import os, urllib, string
from time import sleep
from tntapi import *
CONSTANT_FILL = 6
MAX_TRIES = 10
ROOTPATH = os.getcwdu()
PHOTOS = 'fotos'
COMMENTS = 'comentarios'
JPG = '.jpg'
TXT = '.txt'
def getFullName(picture, counter):
return normalize(string.zfill(counter, CONSTANT_FILL) + '_' + picture[2] + '_' + picture[1])
class Wrapper():
"""
The wrapper for the tntapi.
This class eases the connection.
When constructed, it raises a RuntimeError if it is impossible to log in the
social network.
"""
def __init__(self, browser, console=False):
self.tnt = API(browser)
self.isLogged = False
self.console = console
def waitForLogin(self):
self.tnt.waitForLogin()
def downloadPicturesFromAlbum(self, album, totalPictures, alreadyDownloaded, oldFirstPicture, comments=False):
"""
Download pictures from a given album into the given directory.
Args:
album: the album.
totalPictures: the total number of pictures of the user.
alreadyDownloaded: the number of pictures already downloaded.
oldFirstPicture: the first picture of the previous album.
comments: indicates wether obtain comments of the picture or not.
Raises:
RuntimeError if the user is not already logged in.
"""
if not self.isLogged:
raise RuntimeError('Es necesario estar logueado en la red social')
if self.console:
print '|'
print '| Album', album[0]
print '|'
print '| Obteniendo informacion del album'
joinPath = os.path.join(ROOTPATH, PHOTOS)
if not os.path.exists(joinPath):
if self.console:
print '| Creando directorio donde se alojaran todas las fotos...'
os.makedirs(joinPath)
if self.console:
print '| Directorio creado'
albumPath = os.path.join(joinPath, album[0])
if not os.path.exists(albumPath):
if self.console:
print '| Creando directorio donde se alojaran las fotos del album...'
os.makedirs(albumPath)
if self.console:
print '| Directorio creado'
os.chdir(albumPath)
if self.console:
print '| Comenzando la descarga de las fotos del album...'
counter = 1
newFirstPicture = self.tnt.getFirstPicture(album[2], oldFirstPicture)
firstPicture = ''
lastPicture = ['']
oldSrc = ''
while counter <= album[1] and newFirstPicture != oldFirstPicture:
pic = self.tnt.getPicture(oldSrc, comments)
oldSrc = pic
if counter == 1:
firstPicture = pic
elif pic[0] == firstPicture[0]:
break
if lastPicture[0] != pic[0]:
self.savePicture(pic, counter, album[1], totalPictures, alreadyDownloaded + counter)
if comments:
self.saveComments(pic, counter)
counter += 1
lastPicture = pic
self.tnt.getNextPicture()
return newFirstPicture
def savePicture(self, picture, myCounter, totalAlbum, totalPics, alreadyDown):
"""
Save a picture.
Args:
picture: a picture to be saved.
myCounter: the counter for the picture.
totalAlbum: the number of pictures of the album.
totalPics: the number of pictures of the user.
alreadyDown: the number of pictures already downloaded.
"""
sleep(0.25)
picName = getFullName(picture, myCounter) + JPG
if not os.path.exists(picName):
if self.console:
totalPerc = str(100 * alreadyDown / totalPics)
albumPerc = str(100 * myCounter / totalAlbum)
print '|'
print '| [' + totalPerc + '% total] [' + albumPerc + '% album]'
print '| Descargando foto ' + picName + '...'
urllib.urlretrieve(picture[0], picName)
def saveComments(self, picture, myCounter):
"""
Save a picture's comments.
Args:
picture: to obtain the comments.
myCounter: to know the name of the file with comments.
"""
commentsFileName = getFullName(picture, myCounter) + TXT
if not os.path.exists(commentsFileName) and picture[3] != []:
if self.console:
print '| Descargando sus comentarios...'
file2write = open(commentsFileName, 'w')
for comment in picture[3]:
file2write.write('******************\r\n')
file2write.write(comment.encode('utf-8') + '\r\n')
file2write.close()
def downloadAllPictures(self, comments=False):
"""
Download all the pictures for all the albums.
Args:
comments: indicates wether obtain comments of the picture or not.
Raises:
RuntimeError if the user is not already logged in.
"""
allAlbums = self.tnt.getAllAlbums()
self.isLogged = (allAlbums != None)
if not self.isLogged:
return -1
totalPictures = 0
for album in allAlbums:
totalPictures += album[1]
alreadyDownloaded = 0
oldFirstPicture = ''
for album in allAlbums:
oldFirstPicture = self.downloadPicturesFromAlbum(album, totalPictures, alreadyDownloaded, oldFirstPicture, comments)
alreadyDownloaded += album[1]
return 0
def goToPrivates(self):
"""
Call the API to go to the private messages' page.
"""
self.tnt.goToPrivates()
def downloadAllComments(self):
"""
Download all the comments in the wall.
"""
os.chdir(ROOTPATH)
file2write = open(COMMENTS + TXT, 'w')
tries = 0
discard = 0
while True:
comments = self.tnt.loadMoreComments(discard)
if not comments:
if tries < MAX_TRIES:
tries += 1
sleep(0.3)
else:
break
else:
tries = 1
discard += len(comments)
if self.console:
print '| Descargados ', discard, 'comentarios'
self.saveWall(comments, file2write)
file2write.close()
def saveWall(self, comments, file2write):
"""
Write the comments in the file.
Args:
comments: the list of comments to be saved.
file2write: the file to write in.
"""
for comment in comments:
file2write.write(comment.encode('utf-8') + '\r\n\r\n')
| gpl-3.0 |
AIML/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
floooh/fips | yaml/yaml2/reader.py | 424 | 6746 | # This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, str):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to unicode,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `str` object,
# - a `unicode` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = u''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, unicode):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+u'\0'
elif isinstance(stream, str):
self.name = "<string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = ''
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in u'\n\x85\u2028\u2029' \
or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
self.line += 1
self.column = 0
elif ch != u'\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and len(self.raw_buffer) < 2:
self.update_raw()
if not isinstance(self.raw_buffer, unicode):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError, exc:
character = exc.object[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += u'\0'
self.raw_buffer = None
break
def update_raw(self, size=1024):
data = self.stream.read(size)
if data:
self.raw_buffer += data
self.stream_pointer += len(data)
else:
self.eof = True
#try:
# import psyco
# psyco.bind(Reader)
#except ImportError:
# pass
| mit |
icodedev7/customap | devkit/mingw/bin/lib/textwrap.py | 110 | 16848 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
__revision__ = "$Id$"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
#try:
# True, False
#except NameError:
# (True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z' # end of chunk
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# recompile the regexes for Unicode mode -- done in this clumsy way for
# backwards compatibility because it's rather common to monkey-patch
# the TextWrapper class' wordsep_re attribute.
self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U)
self.wordsep_simple_re_uni = re.compile(
self.wordsep_simple_re.pattern, re.U)
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if isinstance(text, unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
else:
if self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
chunks = filter(None, chunks) # remove empty chunks
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print dedent("Hello there.\n This is indented.")
| mit |
areitz/pants | contrib/go/tests/python/pants_test/contrib/go/tasks/test_go_fetch.py | 2 | 7573 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from collections import defaultdict
from pants.base.address import SyntheticAddress
from pants.base.source_root import SourceRoot
from pants.util.contextutil import temporary_dir
from pants_test.tasks.task_test_base import TaskTestBase
from pants.contrib.go.subsystems.fetchers import ArchiveFetcher, Fetchers
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_fetch import GoFetch
class GoFetchTest(TaskTestBase):
address = SyntheticAddress.parse
@classmethod
def task_type(cls):
return GoFetch
def test_get_remote_import_paths(self):
go_fetch = self.create_task(self.context())
self.create_file('src/github.com/u/a/a.go', contents="""
package a
import (
"fmt"
"math"
"sync"
"bitbucket.org/u/b"
"github.com/u/c"
)
""")
remote_import_ids = go_fetch._get_remote_import_paths('github.com/u/a',
gopath=self.build_root)
self.assertItemsEqual(remote_import_ids, ['bitbucket.org/u/b', 'github.com/u/c'])
def test_resolve_and_inject_explicit(self):
SourceRoot.register(os.path.join(self.build_root, '3rdparty'), GoRemoteLibrary)
r1 = self.make_target(spec='3rdparty/r1', target_type=GoRemoteLibrary)
r2 = self.make_target(spec='3rdparty/r2', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
resolved = go_fetch._resolve(r1, self.address('3rdparty/r2'), 'r2', implict_ok=False)
self.assertEqual(r2, resolved)
def test_resolve_and_inject_explicit_failure(self):
SourceRoot.register(os.path.join(self.build_root, '3rdparty'), GoRemoteLibrary)
r1 = self.make_target(spec='3rdparty/r1', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
with self.assertRaises(go_fetch.UndeclaredRemoteLibError) as cm:
go_fetch._resolve(r1, self.address('3rdparty/r2'), 'r2', implict_ok=False)
self.assertEqual(cm.exception.address, self.address('3rdparty/r2'))
def test_resolve_and_inject_implicit(self):
SourceRoot.register(os.path.join(self.build_root, '3rdparty'), GoRemoteLibrary)
r1 = self.make_target(spec='3rdparty/r1', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
r2 = go_fetch._resolve(r1, self.address('3rdparty/r2'), 'r2', implict_ok=True)
self.assertEqual(self.address('3rdparty/r2'), r2.address)
self.assertIsInstance(r2, GoRemoteLibrary)
def _create_package(self, dirpath, name, deps):
"""Creates a Go package inside dirpath named 'name' importing deps."""
imports = ['import "localzip/{}"'.format(d) for d in deps]
f = os.path.join(dirpath, '{name}/{name}.go'.format(name=name))
self.create_file(f, contents=
"""package {name}
{imports}
""".format(name=name, imports='\n'.join(imports)))
def _create_zip(self, src, dest, name):
"""Zips the Go package in src named 'name' into dest."""
shutil.make_archive(os.path.join(dest, name), 'zip', root_dir=src)
def _create_remote_lib(self, name):
self.make_target(spec='3rdparty/localzip/{name}'.format(name=name),
target_type=GoRemoteLibrary,
pkg=name)
def _init_dep_graph_files(self, src, zipdir, dep_graph):
"""Given a dependency graph, initializes the corresponding BUILD/packages/zip files.
Packages are placed in src, and their zipped contents are placed in zipdir.
"""
for t, deps in dep_graph.items():
self._create_package(src, t, deps)
self._create_zip(src, zipdir, t)
self._create_remote_lib(t)
def _create_fetch_context(self, zipdir):
"""Given a directory of zipfiles, creates a context for GoFetch."""
self.set_options_for_scope('fetchers', mapping={r'.*': Fetchers.alias(ArchiveFetcher)})
matcher = ArchiveFetcher.UrlInfo(url_format=os.path.join(zipdir, '\g<zip>.zip'),
default_rev='HEAD',
strip_level=0)
self.set_options_for_scope('archive-fetcher', matchers={r'localzip/(?P<zip>[^/]+)': matcher})
context = self.context()
context.products.safe_create_data('go_remote_lib_src', lambda: defaultdict(str))
return context
def _assert_dependency_graph(self, root_target, dep_map):
"""Recursively assert that the dependency graph starting at root_target matches dep_map."""
if root_target.name not in dep_map:
return
expected_spec_paths = set('3rdparty/localzip/{}'.format(name)
for name in dep_map[root_target.name])
actual_spec_paths = set(dep.address.spec_path for dep in root_target.dependencies)
self.assertEqual(actual_spec_paths, expected_spec_paths)
dep_map = dep_map.copy()
del dep_map[root_target.name]
for dep in root_target.dependencies:
self._assert_dependency_graph(dep, dep_map)
def test_transitive_download_remote_libs_simple(self):
with temporary_dir() as src:
with temporary_dir() as zipdir:
SourceRoot.register('3rdparty', GoRemoteLibrary)
dep_graph = {
'r1': ['r2'],
'r2': ['r3'],
'r3': []
}
self._init_dep_graph_files(src, zipdir, dep_graph)
r1 = self.target('3rdparty/localzip/r1')
context = self._create_fetch_context(zipdir)
go_fetch = self.create_task(context)
undeclared_deps = go_fetch._transitive_download_remote_libs({r1})
self.assertEqual(undeclared_deps, {})
self._assert_dependency_graph(r1, dep_graph)
def test_transitive_download_remote_libs_complex(self):
with temporary_dir() as src:
with temporary_dir() as zipdir:
SourceRoot.register('3rdparty', GoRemoteLibrary)
dep_graph = {
'r1': ['r3', 'r4'],
'r2': ['r3'],
'r3': ['r4'],
'r4': []
}
self._init_dep_graph_files(src, zipdir, dep_graph)
r1 = self.target('3rdparty/localzip/r1')
r2 = self.target('3rdparty/localzip/r2')
context = self._create_fetch_context(zipdir)
go_fetch = self.create_task(context)
undeclared_deps = go_fetch._transitive_download_remote_libs({r1, r2})
self.assertEqual(undeclared_deps, {})
self._assert_dependency_graph(r1, dep_graph)
self._assert_dependency_graph(r2, dep_graph)
def test_transitive_download_remote_libs_undeclared_deps(self):
with temporary_dir() as src:
with temporary_dir() as zipdir:
SourceRoot.register('3rdparty', GoRemoteLibrary)
dep_graph = {
'r1': ['r2', 'r3'],
'r2': ['r4']
}
self._init_dep_graph_files(src, zipdir, dep_graph)
r1 = self.target('3rdparty/localzip/r1')
r2 = self.target('3rdparty/localzip/r2')
context = self._create_fetch_context(zipdir)
go_fetch = self.create_task(context)
undeclared_deps = go_fetch._transitive_download_remote_libs({r1})
expected = defaultdict(set)
expected[r1] = {('localzip/r3', self.address('3rdparty/localzip/r3'))}
expected[r2] = {('localzip/r4', self.address('3rdparty/localzip/r4'))}
self.assertEqual(undeclared_deps, expected)
| apache-2.0 |
ovnicraft/openerp-restaurant | l10n_br/account.py | 39 | 10574 | # -*- encoding: utf-8 -*-
#################################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
#################################################################################
import openerp
from openerp.osv import fields, osv
TAX_CODE_COLUMNS = {
'domain':fields.char('Domain', size=32,
help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS, COFINS and others taxes included)."),
}
TAX_DEFAULTS = {
'base_reduction': 0,
'amount_mva': 0,
}
class account_tax_code_template(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.code.template'
_columns = TAX_CODE_COLUMNS
def generate_tax_code(self, cr, uid, tax_code_root_id, company_id,
context=None):
"""This function generates the tax codes from the templates of tax
code that are children of the given one passed in argument. Then it
returns a dictionary with the mappping between the templates and the
real objects.
:param tax_code_root_id: id of the root of all the tax code templates
to process.
:param company_id: id of the company the wizard is running for
:returns: dictionary with the mappping between the templates and the
real objects.
:rtype: dict
"""
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_ref = {}
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
#find all the children of the tax_code_root_id
children_tax_code_template = tax_code_root_id and obj_tax_code_template.search(cr, uid, [('parent_id','child_of',[tax_code_root_id])], order='id') or []
for tax_code_template in obj_tax_code_template.browse(cr, uid, children_tax_code_template, context=context):
parent_id = tax_code_template.parent_id and ((tax_code_template.parent_id.id in tax_code_template_ref) and tax_code_template_ref[tax_code_template.parent_id.id]) or False
vals = {
'name': (tax_code_root_id == tax_code_template.id) and company.name or tax_code_template.name,
'code': tax_code_template.code,
'info': tax_code_template.info,
'parent_id': parent_id,
'company_id': company_id,
'sign': tax_code_template.sign,
'domain': tax_code_template.domain,
'tax_discount': tax_code_template.tax_discount,
}
#check if this tax code already exists
rec_list = obj_tax_code.search(cr, uid, [('name', '=', vals['name']),
('parent_id','=',parent_id),
('code', '=', vals['code']),
('company_id', '=', vals['company_id'])], context=context)
if not rec_list:
#if not yet, create it
new_tax_code = obj_tax_code.create(cr, uid, vals)
#recording the new tax code to do the mapping
tax_code_template_ref[tax_code_template.id] = new_tax_code
return tax_code_template_ref
class account_tax_code(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.code'
_columns = TAX_CODE_COLUMNS
def get_precision_tax():
def change_digit_tax(cr):
decimal_precision = openerp.registry(cr.dbname)['decimal.precision']
res = decimal_precision.precision_get(cr, 1, 'Account')
return (16, res+2)
return change_digit_tax
class account_tax_template(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.template'
_columns = {
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS e etc.)."),
'base_reduction': fields.float('Redution', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'amount_mva': fields.float('MVA Percent', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'type': fields.selection([('percent','Percentage'),
('fixed','Fixed Amount'),
('none','None'),
('code','Python Code'),
('balance','Balance'),
('quantity','Quantity')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
}
_defaults = TAX_DEFAULTS
def _generate_tax(self, cr, uid, tax_templates, tax_code_template_ref, company_id, context=None):
"""
This method generate taxes from templates.
:param tax_templates: list of browse record of the tax templates to process
:param tax_code_template_ref: Taxcode templates reference.
:param company_id: id of the company the wizard is running for
:returns:
{
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
result = super(account_tax_template, self)._generate_tax(cr, uid,
tax_templates,
tax_code_template_ref,
company_id,
context)
tax_templates = self.browse(cr, uid, result['tax_template_to_tax'].keys(), context)
obj_acc_tax = self.pool.get('account.tax')
for tax_template in tax_templates:
if tax_template.tax_code_id:
obj_acc_tax.write(cr, uid, result['tax_template_to_tax'][tax_template.id], {'domain': tax_template.tax_code_id.domain,
'tax_discount': tax_template.tax_code_id.tax_discount})
return result
def onchange_tax_code_id(self, cr, uid, ids, tax_code_id, context=None):
result = {'value': {}}
if not tax_code_id:
return result
obj_tax_code = self.pool.get('account.tax.code.template').browse(cr, uid, tax_code_id)
if obj_tax_code:
result['value']['tax_discount'] = obj_tax_code.tax_discount
result['value']['domain'] = obj_tax_code.domain
return result
class account_tax(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax'
_columns = {
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS e etc.)."),
'base_reduction': fields.float('Redution', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'amount_mva': fields.float('MVA Percent', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'type': fields.selection([('percent','Percentage'),
('fixed','Fixed Amount'),
('none','None'),
('code','Python Code'),
('balance','Balance'),
('quantity','Quantity')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
}
_defaults = TAX_DEFAULTS
def onchange_tax_code_id(self, cr, uid, ids, tax_code_id, context=None):
result = {'value': {}}
if not tax_code_id:
return result
obj_tax_code = self.pool.get('account.tax.code').browse(cr, uid, tax_code_id)
if obj_tax_code:
result['value']['tax_discount'] = obj_tax_code.tax_discount
result['value']['domain'] = obj_tax_code.domain
return result
| agpl-3.0 |
encbladexp/ansible | hacking/return_skeleton_generator.py | 35 | 3330 | #!/usr/bin/env python
# (c) 2017, Will Thames <will@thames.id.au>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# return_skeleton_generator.py takes JSON output from a module and
# and creates a starting point for the RETURNS section of a module.
# This can be provided as stdin or a file argument
#
# The easiest way to obtain the JSON output is to use hacking/test-module.py
#
# You will likely want to adjust this to remove sensitive data or
# ensure the `returns` value is correct, and to write a useful description
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import OrderedDict
import json
import sys
import yaml
# Allow OrderedDicts to be used when dumping YAML
# https://stackoverflow.com/a/16782282/3538079
def represent_ordereddict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
def get_return_data(key, value):
# The OrderedDict here is so that, for complex objects, the
# summary data is at the top before the contains information
returns_info = {key: OrderedDict()}
returns_info[key]['description'] = "FIXME *** add description for %s" % key
returns_info[key]['returned'] = "always"
if isinstance(value, dict):
returns_info[key]['type'] = 'complex'
returns_info[key]['contains'] = get_all_items(value)
elif isinstance(value, list) and value and isinstance(value[0], dict):
returns_info[key]['type'] = 'complex'
returns_info[key]['contains'] = get_all_items(value[0])
else:
returns_info[key]['type'] = type(value).__name__
returns_info[key]['sample'] = value
# override python unicode type to set to string for docs
if returns_info[key]['type'] == 'unicode':
returns_info[key]['type'] = 'str'
return returns_info
def get_all_items(data):
items = sorted([get_return_data(key, value) for key, value in data.items()])
result = OrderedDict()
for item in items:
key, value = item.items()[0]
result[key] = value
return result
def main(args):
yaml.representer.SafeRepresenter.add_representer(OrderedDict, represent_ordereddict)
if args:
src = open(args[0])
else:
src = sys.stdin
data = json.load(src, strict=False)
docs = get_all_items(data)
if 'invocation' in docs:
del(docs['invocation'])
print(yaml.safe_dump(docs, default_flow_style=False))
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 |
joachimwolff/bioconda-recipes | recipes/biopet-fastqsplitter/0.1/biopet-fastqsplitter.py | 82 | 3377 | #!/usr/bin/env python
#
# Wrapper script for starting the biopet-fastqsplitter JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'FastqSplitter-assembly-0.1.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
salamer/django | tests/validators/tests.py | 163 | 16552 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import types
from datetime import datetime, timedelta
from unittest import TestCase
from django.core.exceptions import ValidationError
from django.core.validators import (
BaseValidator, EmailValidator, MaxLengthValidator, MaxValueValidator,
MinLengthValidator, MinValueValidator, RegexValidator, URLValidator,
int_list_validator, validate_comma_separated_integer_list, validate_email,
validate_integer, validate_ipv4_address, validate_ipv6_address,
validate_ipv46_address, validate_slug, validate_unicode_slug,
)
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils._os import upath
NOW = datetime.now()
EXTENDED_SCHEMES = ['http', 'https', 'ftp', 'ftps', 'git', 'file']
TEST_DATA = [
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, ValidationError),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_integer, '\n42', ValidationError),
(validate_integer, '42\n', ValidationError),
(validate_email, 'email@here.com', None),
(validate_email, 'weirder-email@here.and.there.com', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'email@[2001:dB8::1]', None),
(validate_email, 'email@[2001:dB8:0:0:0:0:0:1]', None),
(validate_email, 'email@[::fffF:127.0.0.1]', None),
(validate_email, 'example@valid-----hyphens.com', None),
(validate_email, 'example@valid-with-hyphens.com', None),
(validate_email, 'test@domain.with.idn.tld.उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, 'example@atm.%s' % ('a' * 63), None),
(validate_email, 'example@%s.atm' % ('a' * 63), None),
(validate_email, 'example@%s.%s.atm' % ('a' * 63, 'b' * 10), None),
(validate_email, 'example@atm.%s' % ('a' * 64), ValidationError),
(validate_email, 'example@%s.atm.%s' % ('b' * 64, 'a' * 63), ValidationError),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, 'abc@.com', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, 'email@127.0.0.1', ValidationError),
(validate_email, 'email@[127.0.0.256]', ValidationError),
(validate_email, 'email@[2001:db8::12345]', ValidationError),
(validate_email, 'email@[2001:db8:0:0:0:0:1]', ValidationError),
(validate_email, 'email@[::ffff:127.0.0.256]', ValidationError),
(validate_email, 'example@invalid-.com', ValidationError),
(validate_email, 'example@-invalid.com', ValidationError),
(validate_email, 'example@invalid.com-', ValidationError),
(validate_email, 'example@inv-.alid-.com', ValidationError),
(validate_email, 'example@inv-.-alid.com', ValidationError),
(validate_email, 'test@example.com\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_email, 'trailingdot@shouldfail.com.', ValidationError),
# Max length of domain name labels is 63 characters per RFC 1034.
(validate_email, 'a@%s.us' % ('a' * 63), None),
(validate_email, 'a@%s.us' % ('a' * 64), ValidationError),
# Trailing newlines in username or domain not allowed
(validate_email, 'a@b.com\n', ValidationError),
(validate_email, 'a\n@b.com', ValidationError),
(validate_email, '"test@test"\n@example.com', ValidationError),
(validate_email, 'a@[127.0.0.1]\n', ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, 'a', None),
(validate_slug, '1', None),
(validate_slug, 'a1', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, 'some@mail.com', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '你 好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_slug, 'trailing-newline\n', ValidationError),
(validate_unicode_slug, 'slug-ok', None),
(validate_unicode_slug, 'longer-slug-still-ok', None),
(validate_unicode_slug, '--------', None),
(validate_unicode_slug, 'nohyphensoranything', None),
(validate_unicode_slug, 'a', None),
(validate_unicode_slug, '1', None),
(validate_unicode_slug, 'a1', None),
(validate_unicode_slug, '你好', None),
(validate_unicode_slug, '', ValidationError),
(validate_unicode_slug, ' text ', ValidationError),
(validate_unicode_slug, ' ', ValidationError),
(validate_unicode_slug, 'some@mail.com', ValidationError),
(validate_unicode_slug, '\n', ValidationError),
(validate_unicode_slug, '你 好', ValidationError),
(validate_unicode_slug, 'trailing-newline\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
(validate_ipv4_address, '1.1.1.1\n', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in its own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '12', None),
(validate_comma_separated_integer_list, '1,2', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '10,32', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(validate_comma_separated_integer_list, ',', ValidationError),
(validate_comma_separated_integer_list, '1,2,3,', ValidationError),
(validate_comma_separated_integer_list, '1,2,', ValidationError),
(validate_comma_separated_integer_list, ',1', ValidationError),
(validate_comma_separated_integer_list, '1,,2', ValidationError),
(int_list_validator(sep='.'), '1.2.3', None),
(int_list_validator(sep='.'), '1,2,3', ValidationError),
(int_list_validator(sep='.'), '1.2.3\n', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10 * 'x', None),
(MaxLengthValidator(10), 15 * 'x', ValidationError),
(MinLengthValidator(10), 15 * 'x', None),
(MinLengthValidator(10), 10 * 'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(EXTENDED_SCHEMES), 'file://localhost/path', None),
(URLValidator(EXTENDED_SCHEMES), 'git://example.com/', None),
(URLValidator(EXTENDED_SCHEMES), 'git://-invalid.com', ValidationError),
# Trailing newlines not accepted
(URLValidator(), 'http://www.djangoproject.com/\n', ValidationError),
(URLValidator(), 'http://[::ffff:192.9.5.5]\n', ValidationError),
# Trailing junk does not take forever to reject
(URLValidator(), 'http://www.asdasdasdasdsadfm.com.br ', ValidationError),
(URLValidator(), 'http://www.asdasdasdasdsadfm.com.br z', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
(RegexValidator('x', inverse_match=True), 'y', None),
(RegexValidator(re.compile('x'), inverse_match=True), 'y', None),
(RegexValidator('x', inverse_match=True), 'x', ValidationError),
(RegexValidator(re.compile('x'), inverse_match=True), 'x', ValidationError),
(RegexValidator('x', flags=re.IGNORECASE), 'y', ValidationError),
(RegexValidator('a'), 'A', ValidationError),
(RegexValidator('a', flags=re.IGNORECASE), 'A', None),
]
def create_path(filename):
return os.path.abspath(os.path.join(os.path.dirname(upath(__file__)), filename))
# Add valid and invalid URL tests.
# This only tests the validator without extended schemes.
with io.open(create_path('valid_urls.txt'), encoding='utf8') as f:
for url in f:
TEST_DATA.append((URLValidator(), url.strip(), None))
with io.open(create_path('invalid_urls.txt'), encoding='utf8') as f:
for url in f:
TEST_DATA.append((URLValidator(), url.strip(), ValidationError))
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(SimpleTestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), str_prefix("{%(_)s'first': [%(_)s'First Problem']}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': [%(_)s'First Problem']})"))
def test_regex_validator_flags(self):
try:
RegexValidator(re.compile('a'), flags=re.IGNORECASE)
except TypeError:
pass
else:
self.fail("TypeError not raised when flags and pre-compiled regex in RegexValidator")
def test_max_length_validator_message(self):
v = MaxLengthValidator(16, message='"%(value)s" has more than %(limit_value)d characters.')
with self.assertRaisesMessage(ValidationError, '"djangoproject.com" has more than 16 characters.'):
v('djangoproject.com')
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
class TestValidatorEquality(TestCase):
"""
Tests that validators have valid equality operators (#21638)
"""
def test_regex_equality(self):
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[0-9\.\-]*)://'),
)
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator('', flags=re.IGNORECASE),
RegexValidator(''),
)
self.assertNotEqual(
RegexValidator(''),
RegexValidator('', inverse_match=True),
)
def test_regex_equality_nocache(self):
pattern = r'^(?:[a-z0-9\.\-]*)://'
left = RegexValidator(pattern)
re.purge()
right = RegexValidator(pattern)
self.assertEqual(
left,
right,
)
def test_regex_equality_blank(self):
self.assertEqual(
RegexValidator(),
RegexValidator(),
)
def test_email_equality(self):
self.assertEqual(
EmailValidator(),
EmailValidator(),
)
self.assertNotEqual(
EmailValidator(message="BAD EMAIL"),
EmailValidator(),
)
self.assertEqual(
EmailValidator(message="BAD EMAIL", code="bad"),
EmailValidator(message="BAD EMAIL", code="bad"),
)
def test_basic_equality(self):
self.assertEqual(
MaxValueValidator(44),
MaxValueValidator(44),
)
self.assertNotEqual(
MaxValueValidator(44),
MinValueValidator(44),
)
self.assertNotEqual(
MinValueValidator(45),
MinValueValidator(11),
)
| bsd-3-clause |
robotpilot/crazyflie-clients-python | lib/cflib/crtp/__init__.py | 6 | 2956 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Scans and creates communication interfaces."""
__author__ = 'Bitcraze AB'
__all__ = []
import logging
logger = logging.getLogger(__name__)
from .radiodriver import RadioDriver
from .udpdriver import UdpDriver
from .serialdriver import SerialDriver
from .debugdriver import DebugDriver
from .usbdriver import UsbDriver
from .exceptions import WrongUriType
DRIVERS = [RadioDriver, SerialDriver, UdpDriver, DebugDriver, UsbDriver]
INSTANCES = []
def init_drivers(enable_debug_driver=False):
"""Initialize all the drivers."""
for driver in DRIVERS:
try:
if driver != DebugDriver or enable_debug_driver:
INSTANCES.append(driver())
except Exception: # pylint: disable=W0703
continue
def scan_interfaces(address = None):
""" Scan all the interfaces for available Crazyflies """
available = []
found = []
for instance in INSTANCES:
logger.debug("Scanning: %s", instance)
try:
found = instance.scan_interface(address)
available += found
except Exception:
raise
return available
def get_interfaces_status():
"""Get the status of all the interfaces"""
status = {}
for instance in INSTANCES:
try:
status[instance.get_name()] = instance.get_status()
except Exception:
raise
return status
def get_link_driver(uri, link_quality_callback=None, link_error_callback=None):
"""Return the link driver for the given URI. Returns None if no driver
was found for the URI or the URI was not well formatted for the matching
driver."""
for instance in INSTANCES:
try:
instance.connect(uri, link_quality_callback, link_error_callback)
return instance
except WrongUriType:
continue
return None
| gpl-2.0 |
jalourenco/wagtaildemo | demo/templatetags/demo_tags.py | 1 | 5358 | from datetime import date
from django import template
from django.conf import settings
from demo.models import *
register = template.Library()
# settings value
@register.assignment_tag
def get_googe_maps_key():
return getattr(settings, 'GOOGLE_MAPS_KEY', "")
@register.assignment_tag(takes_context=True)
def get_site_root(context):
# NB this returns a core.Page, not the implementation-specific model used
# so object-comparison to self will return false as objects would differ
return context['request'].site.root_page
def has_menu_children(page):
if page.get_children().filter(live=True, show_in_menus=True):
return True
else:
return False
# Retrieves the top menu items - the immediate children of the parent page
# The has_menu_children method is necessary because the bootstrap menu requires
# a dropdown class to be applied to a parent
@register.inclusion_tag('demo/tags/top_menu.html', takes_context=True)
def top_menu(context, parent, calling_page=None):
menuitems = parent.get_children().filter(
live=True,
show_in_menus=True
)
for menuitem in menuitems:
menuitem.show_dropdown = has_menu_children(menuitem)
return {
'calling_page': calling_page,
'menuitems': menuitems,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves the children of the top menu items for the drop downs
@register.inclusion_tag('demo/tags/top_menu_children.html', takes_context=True)
def top_menu_children(context, parent):
menuitems_children = parent.get_children()
menuitems_children = menuitems_children.filter(
live=True,
show_in_menus=True
)
return {
'parent': parent,
'menuitems_children': menuitems_children,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves the secondary links for the 'also in this section' links
# - either the children or siblings of the current page
@register.inclusion_tag('demo/tags/secondary_menu.html', takes_context=True)
def secondary_menu(context, calling_page=None):
pages = []
if calling_page:
pages = calling_page.get_children().filter(
live=True,
show_in_menus=True
)
# If no children, get siblings instead
if len(pages) == 0:
pages = calling_page.get_siblings(inclusive=False).filter(
live=True,
show_in_menus=True
)
return {
'pages': pages,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves all live pages which are children of the calling page
#for standard index listing
@register.inclusion_tag(
'demo/tags/standard_index_listing.html',
takes_context=True
)
def standard_index_listing(context, calling_page):
pages = calling_page.get_children().filter(live=True)
return {
'pages': pages,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Person feed for home page
@register.inclusion_tag(
'demo/tags/person_listing_homepage.html',
takes_context=True
)
def person_listing_homepage(context, count=2):
people = PersonPage.objects.filter(live=True).order_by('?')
return {
'people': people[:count],
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Blog feed for home page
@register.inclusion_tag(
'demo/tags/blog_listing_homepage.html',
takes_context=True
)
def blog_listing_homepage(context, count=2):
blogs = BlogPage.objects.filter(live=True).order_by('-date')
return {
'blogs': blogs[:count],
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Events feed for home page
@register.inclusion_tag(
'demo/tags/event_listing_homepage.html',
takes_context=True
)
def event_listing_homepage(context, count=2):
events = EventPage.objects.filter(live=True)
events = events.filter(date_from__gte=date.today()).order_by('date_from')
return {
'events': events[:count],
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Advert snippets
@register.inclusion_tag('demo/tags/adverts.html', takes_context=True)
def adverts(context):
return {
'adverts': Advert.objects.all(),
'request': context['request'],
}
# Format times e.g. on event page
@register.filter
def time_display(time):
# Get hour and minute from time object
hour = time.hour
minute = time.minute
# Convert to 12 hour format
if hour >= 12:
pm = True
hour -= 12
else:
pm = False
if hour == 0:
hour = 12
# Hour string
hour_string = str(hour)
# Minute string
if minute != 0:
minute_string = "." + str(minute)
else:
minute_string = ""
# PM string
if pm:
pm_string = "pm"
else:
pm_string = "am"
# Join and return
return "".join([hour_string, minute_string, pm_string])
| bsd-3-clause |
lucywyman/slides-ii | v/lib/python2.7/site-packages/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| apache-2.0 |
MoritzS/django | django/core/checks/templates.py | 49 | 1185 | import copy
from django.conf import settings
from . import Error, Tags, register
E001 = Error(
"You have 'APP_DIRS': True in your TEMPLATES but also specify 'loaders' "
"in OPTIONS. Either remove APP_DIRS or remove the 'loaders' option.",
id='templates.E001',
)
E002 = Error(
"'string_if_invalid' in TEMPLATES OPTIONS must be a string but got: {} ({}).",
id="templates.E002",
)
@register(Tags.templates)
def check_setting_app_dirs_loaders(app_configs, **kwargs):
passed_check = True
for conf in settings.TEMPLATES:
if not conf.get('APP_DIRS'):
continue
if 'loaders' in conf.get('OPTIONS', {}):
passed_check = False
return [] if passed_check else [E001]
@register(Tags.templates)
def check_string_if_invalid_is_string(app_configs, **kwargs):
errors = []
for conf in settings.TEMPLATES:
string_if_invalid = conf.get('OPTIONS', {}).get('string_if_invalid', '')
if not isinstance(string_if_invalid, str):
error = copy.copy(E002)
error.msg = error.msg.format(string_if_invalid, type(string_if_invalid).__name__)
errors.append(error)
return errors
| bsd-3-clause |
jigarkb/Programming | LeetCode/238-M-ProductOfArrayExceptSelf.py | 2 | 1365 | # Given an array of n integers where n > 1, nums, return an array output such that output[i] is equal to the product
# of all the elements of nums except nums[i].
#
# Solve it without division and in O(n).
#
# For example, given [1,2,3,4], return [24,12,8,6].
#
# Follow up:
# Could you solve it with constant space complexity? (Note: The output array does not count as extra space for the
# purpose of space complexity analysis.)
class Solution(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n = len(nums)
result = [1]
for i in range(1, n):
result.append(result[i - 1] * nums[i - 1])
right = 1
for i in range(n-1, -1, -1):
result[i] *= right
right *= nums[i]
return result
# Note
# Given numbers [2, 3, 4, 5], regarding the third number 4, the product of array except 4 is 2*3*5 which consists of
# two parts: left 2*3 and right 5. The product is left*right. We can get lefts and rights:
#
# Numbers: 2 3 4 5
# Lefts: 2 2*3 2*3*4
# Rights: 3*4*5 4*5 5
#
# Let's fill the empty with 1:
# Numbers: 2 3 4 5
# Lefts: 1 2 2*3 2*3*4
# Rights: 3*4*5 4*5 5 1
#
# We can calculate lefts and rights in 2 loops. The time complexity is O(n).
| mit |
izgzhen/servo | tests/wpt/web-platform-tests/tools/py/testing/path/test_svnurl.py | 218 | 3524 | import py
from py._path.svnurl import InfoSvnCommand
import datetime
import time
from svntestbase import CommonSvnTests
def pytest_funcarg__path1(request):
repo, repourl, wc = request.getfuncargvalue("repowc1")
return py.path.svnurl(repourl)
class TestSvnURLCommandPath(CommonSvnTests):
@py.test.mark.xfail
def test_load(self, path1):
super(TestSvnURLCommandPath, self).test_load(path1)
# the following two work on jython but not in local/svnwc
def test_listdir(self, path1):
super(TestSvnURLCommandPath, self).test_listdir(path1)
def test_visit_ignore(self, path1):
super(TestSvnURLCommandPath, self).test_visit_ignore(path1)
def test_svnurl_needs_arg(self, path1):
py.test.raises(TypeError, "py.path.svnurl()")
def test_svnurl_does_not_accept_None_either(self, path1):
py.test.raises(Exception, "py.path.svnurl(None)")
def test_svnurl_characters_simple(self, path1):
py.path.svnurl("svn+ssh://hello/world")
def test_svnurl_characters_at_user(self, path1):
py.path.svnurl("http://user@host.com/some/dir")
def test_svnurl_characters_at_path(self, path1):
py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo@bar")')
def test_svnurl_characters_colon_port(self, path1):
py.path.svnurl("http://host.com:8080/some/dir")
def test_svnurl_characters_tilde_end(self, path1):
py.path.svnurl("http://host.com/some/file~")
@py.test.mark.xfail("sys.platform == 'win32'")
def test_svnurl_characters_colon_path(self, path1):
# colons are allowed on win32, because they're part of the drive
# part of an absolute path... however, they shouldn't be allowed in
# other parts, I think
py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo:bar")')
def test_export(self, path1, tmpdir):
tmpdir = tmpdir.join("empty")
p = path1.export(tmpdir)
assert p == tmpdir # XXX should return None
n1 = [x.basename for x in tmpdir.listdir()]
n2 = [x.basename for x in path1.listdir()]
n1.sort()
n2.sort()
assert n1 == n2
assert not p.join('.svn').check()
rev = path1.mkdir("newdir")
tmpdir.remove()
assert not tmpdir.check()
path1.new(rev=1).export(tmpdir)
for p in tmpdir.listdir():
assert p.basename in n2
class TestSvnInfoCommand:
def test_svn_1_2(self):
line = " 2256 hpk 165 Nov 24 17:55 __init__.py"
info = InfoSvnCommand(line)
now = datetime.datetime.now()
assert info.last_author == 'hpk'
assert info.created_rev == 2256
assert info.kind == 'file'
# we don't check for the year (2006), because that depends
# on the clock correctly being setup
assert time.gmtime(info.mtime)[1:6] == (11, 24, 17, 55, 0)
assert info.size == 165
assert info.time == info.mtime * 1000000
def test_svn_1_3(self):
line =" 4784 hpk 2 Jun 01 2004 __init__.py"
info = InfoSvnCommand(line)
assert info.last_author == 'hpk'
assert info.kind == 'file'
def test_svn_1_3_b(self):
line =" 74 autoadmi Oct 06 23:59 plonesolutions.com/"
info = InfoSvnCommand(line)
assert info.last_author == 'autoadmi'
assert info.kind == 'dir'
def test_badchars():
py.test.raises(ValueError, "py.path.svnurl('http://host/tmp/@@@:')")
| mpl-2.0 |
Schibum/sndlatr | gae/w69b/cache.py | 3 | 5183 | import collections
import functools
from itertools import ifilterfalse
from heapq import nsmallest
from operator import itemgetter
class Counter(dict):
'Mapping where default values are zero'
def __missing__(self, key):
return 0
def lru_cache(maxsize=100):
'''Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
'''
maxqueue = maxsize * 10
def decorating_function(user_function,
len=len, iter=iter, tuple=tuple, sorted=sorted, KeyError=KeyError):
cache = {} # mapping of args to results
queue = collections.deque() # order that keys have been used
refcount = Counter() # times each key is in the queue
sentinel = object() # marker for looping around the queue
kwd_mark = object() # separate positional and keyword args
# lookup optimizations (ugly but fast)
queue_append, queue_popleft = queue.append, queue.popleft
queue_appendleft, queue_pop = queue.appendleft, queue.pop
@functools.wraps(user_function)
def wrapper(*args, **kwds):
# cache key records both positional and keyword args
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
# record recent use of this key
queue_append(key)
refcount[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least recently used cache entry
if len(cache) > maxsize:
key = queue_popleft()
refcount[key] -= 1
while refcount[key]:
key = queue_popleft()
refcount[key] -= 1
del cache[key], refcount[key]
# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access
if len(queue) > maxqueue:
refcount.clear()
queue_appendleft(sentinel)
for key in ifilterfalse(refcount.__contains__,
iter(queue_pop, sentinel)):
queue_appendleft(key)
refcount[key] = 1
return result
def clear():
cache.clear()
queue.clear()
refcount.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
def lfu_cache(maxsize=100):
'''Least-frequenty-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Least_Frequently_Used
'''
def decorating_function(user_function):
cache = {} # mapping of args to results
use_count = Counter() # times each key has been accessed
kwd_mark = object() # separate positional and keyword args
@functools.wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
use_count[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least frequently used cache entry
if len(cache) > maxsize:
for key, _ in nsmallest(maxsize // 10,
use_count.iteritems(),
key=itemgetter(1)):
del cache[key], use_count[key]
return result
def clear():
cache.clear()
use_count.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
if __name__ == '__main__':
@lru_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
@lfu_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
| apache-2.0 |
evernym/zeno | plenum/test/bls/test_bls_bft_replica.py | 2 | 32557 | from copy import copy
import base58
import pytest
from crypto.bls.bls_bft_replica import BlsBftReplica
from crypto.bls.bls_multi_signature import MultiSignature, MultiSignatureValue
from plenum.bls.bls_bft_factory import create_default_bls_bft_factory
from plenum.common.constants import DOMAIN_LEDGER_ID, POOL_LEDGER_ID, CONFIG_LEDGER_ID, AUDIT_LEDGER_ID, TXN_PAYLOAD, \
TXN_PAYLOAD_DATA, AUDIT_TXN_STATE_ROOT, AUDIT_TXN_LEDGER_ROOT, AUDIT_TXN_PP_SEQ_NO
from plenum.common.messages.node_messages import PrePrepare
from plenum.common.util import get_utc_epoch
from plenum.server.quorums import Quorums
from plenum.test.bls.helper import process_commits_for_key, calculate_multi_sig, process_ordered
from plenum.test.helper import create_pre_prepare_params, create_pre_prepare_no_bls, create_commit_params, \
create_commit_no_bls_sig, create_commit_with_bls_sig, create_commit_bls_sig, create_prepare_params, create_prepare, \
generate_state_root, create_commit_with_bls_sigs
whitelist = ['Indy Crypto error']
@pytest.fixture()
def _bls_bft_replicas(txnPoolNodeSet):
bls_bft_replicas = []
for node in txnPoolNodeSet:
bls_bft_replica = create_default_bls_bft_factory(node).create_bls_bft_replica(is_master=True)
bls_bft_replicas.append(bls_bft_replica)
return bls_bft_replicas
@pytest.fixture()
def quorums(txnPoolNodeSet):
return Quorums(len(txnPoolNodeSet))
@pytest.fixture(params=[POOL_LEDGER_ID, DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID])
def ledger_id(request):
return request.param
@pytest.fixture()
def state_root():
return generate_state_root()
@pytest.fixture()
def txn_root():
return generate_state_root()
@pytest.fixture()
def pool_state_root(_bls_bft_replicas):
bls_bft_replica = _bls_bft_replicas[0]
return bls_bft_replica.state_root_serializer.serialize(
bytes(bls_bft_replica._bls_bft.bls_key_register.get_pool_root_hash_committed()))
@pytest.fixture()
def pool_txn_root():
return generate_state_root()
@pytest.yield_fixture()
def patch_audit_ledger(txnPoolNodeSet, pool_state_root, state_root, txn_root, pool_txn_root, ledger_id):
ledgers_subst = []
for node in txnPoolNodeSet:
ledgers_subst.append(_patch_audit_ledger(node, pool_state_root, state_root, txn_root, pool_txn_root, ledger_id))
yield txnPoolNodeSet
for ledger, old, old_t in ledgers_subst:
ledger.get_by_seq_no_uncommitted = old
ledger.uncommittedTxns = old_t
def _patch_audit_ledger(node, pool_state_root, state_root, txn_root, pool_txn_root, ledger_id):
audit_ledger = node.db_manager.get_ledger(AUDIT_LEDGER_ID)
old_last = audit_ledger.get_by_seq_no_uncommitted
old_txn = audit_ledger.uncommittedTxns
audit_ledger.uncommittedTxns = [1]
audit_ledger.get_by_seq_no_uncommitted = lambda x: {
TXN_PAYLOAD: {
TXN_PAYLOAD_DATA: {
AUDIT_TXN_STATE_ROOT: {
3: "2UQ3Da54cQ6SamunzXVAtBozFnkACELBH7HzbRPgfKzm",
POOL_LEDGER_ID: pool_state_root,
ledger_id: state_root
},
AUDIT_TXN_LEDGER_ROOT: {
3: "2UQ3Da54cQ6SamunzXVAtBozFnkACELBH7HzbRPgfKzm",
POOL_LEDGER_ID: pool_txn_root,
ledger_id: txn_root
},
AUDIT_TXN_PP_SEQ_NO: 0
}
}
}
return audit_ledger, old_last, old_txn
@pytest.fixture()
def bls_bft_replicas(_bls_bft_replicas, patch_audit_ledger):
return _bls_bft_replicas
@pytest.fixture()
def multi_sig_value(state_root, pool_state_root, txn_root, ledger_id):
return MultiSignatureValue(ledger_id=ledger_id,
state_root_hash=state_root,
pool_state_root_hash=pool_state_root,
txn_root_hash=txn_root,
timestamp=get_utc_epoch())
@pytest.fixture()
def multi_sig_values(state_root, txn_root, pool_state_root, pool_txn_root):
return {DOMAIN_LEDGER_ID: MultiSignatureValue(ledger_id=DOMAIN_LEDGER_ID,
state_root_hash=state_root,
pool_state_root_hash=pool_state_root,
txn_root_hash=txn_root,
timestamp=get_utc_epoch()),
POOL_LEDGER_ID: MultiSignatureValue(ledger_id=DOMAIN_LEDGER_ID,
state_root_hash=pool_state_root,
pool_state_root_hash=pool_state_root,
txn_root_hash=pool_txn_root,
timestamp=get_utc_epoch())}
@pytest.fixture()
def multi_signature(bls_bft_replicas, multi_sig_value):
sigs = []
participants = []
message = multi_sig_value.as_single_value()
for bls_bft_replica in bls_bft_replicas:
sigs.append(bls_bft_replica._bls_bft.bls_crypto_signer.sign(message))
participants.append(bls_bft_replica.node_id)
multi_sig = bls_bft_replicas[0]._bls_bft.bls_crypto_verifier.create_multi_sig(sigs)
return MultiSignature(signature=multi_sig,
participants=participants,
value=multi_sig_value)
@pytest.fixture()
def multi_signature_multi(bls_bft_replicas, multi_sig_values):
res = []
for ledger_id, multi_sig_value in multi_sig_values.items():
sigs = []
participants = []
message = multi_sig_value.as_single_value()
for bls_bft_replica in bls_bft_replicas:
sigs.append(bls_bft_replica._bls_bft.bls_crypto_signer.sign(message))
participants.append(bls_bft_replica.node_id)
multi_sig = bls_bft_replicas[0]._bls_bft.bls_crypto_verifier.create_multi_sig(sigs)
res.append(MultiSignature(signature=multi_sig,
participants=participants,
value=multi_sig_value))
return res
@pytest.fixture()
def pre_prepare_with_bls(multi_signature, ledger_id):
params = create_pre_prepare_params(state_root=multi_signature.value.state_root_hash,
ledger_id=ledger_id,
pool_state_root=multi_signature.value.pool_state_root_hash,
bls_multi_sig=multi_signature)
return PrePrepare(*params)
@pytest.fixture()
def pre_prepare_with_bls_multi(multi_signature, ledger_id, multi_signature_multi):
params = create_pre_prepare_params(state_root=multi_signature.value.state_root_hash,
ledger_id=ledger_id,
pool_state_root=multi_signature.value.pool_state_root_hash,
bls_multi_sigs=multi_signature_multi)
return PrePrepare(*params)
@pytest.fixture()
def pre_prepare_with_incorrect_bls(multi_signature, ledger_id):
multi_signature.signature = base58.b58encode(b"somefakesignaturesomefakesignaturesomefakesignature").decode("utf-8")
params = create_pre_prepare_params(state_root=multi_signature.value.state_root_hash,
ledger_id=ledger_id,
pool_state_root=multi_signature.value.pool_state_root_hash,
bls_multi_sig=multi_signature)
return PrePrepare(*params)
@pytest.fixture()
def pre_prepare_no_bls(state_root, pool_state_root, ledger_id):
params = create_pre_prepare_params(state_root=state_root,
ledger_id=ledger_id,
pool_state_root=pool_state_root)
return PrePrepare(*params)
@pytest.yield_fixture(scope="function", params=['state_root', 'timestamp', 'txn_root'])
def pre_prepare_incorrect(state_root, request, ledger_id):
if request.param == 'state_root':
params = create_pre_prepare_params(state_root=generate_state_root(), ledger_id=ledger_id)
elif request.param == 'ledger_id':
params = create_pre_prepare_params(state_root=state_root, ledger_id=10)
elif request.param == 'timestamp':
params = create_pre_prepare_params(state_root=state_root, ledger_id=ledger_id, timestamp=get_utc_epoch() + 1000)
elif request.param == 'txn_root':
params = create_pre_prepare_params(state_root=state_root, ledger_id=ledger_id, txn_root=generate_state_root())
return PrePrepare(*params)
# ------ CHECK ACCEPTABLE LEDGER IDs ------
def test_process_ledger(bls_bft_replicas, ledger_id):
for r in bls_bft_replicas:
assert r._can_process_ledger(ledger_id)
# ------ CREATE 3PC MESSAGES ------
def test_update_pre_prepare_first_time(bls_bft_replicas, state_root, ledger_id):
params = create_pre_prepare_params(state_root, ledger_id=ledger_id)
params_initial = copy(params)
for bls_bft_replica in bls_bft_replicas:
params = bls_bft_replica.update_pre_prepare(params, ledger_id)
assert params == params_initial
def test_update_pre_prepare_after_ordered(bls_bft_replicas, state_root, fake_multi_sig, ledger_id):
for bls_bft_replica in bls_bft_replicas:
bls_bft_replica._all_bls_latest_multi_sigs = [fake_multi_sig]
params = create_pre_prepare_params(state_root, ledger_id=ledger_id)
params_initial = copy(params)
for bls_bft_replica in bls_bft_replicas:
params = bls_bft_replica.update_pre_prepare(copy(params_initial), ledger_id)
assert params != params_initial
# we fill BLS_MULTI_SIG by None for backward compatibility
assert len(params) - len(params_initial) == 2
assert params[-2] is None
def test_update_prepare(bls_bft_replicas, state_root, ledger_id):
params = create_prepare_params(0, 0, state_root)
params_initial = copy(params)
for bls_bft_replica in bls_bft_replicas:
params = bls_bft_replica.update_prepare(params, ledger_id)
assert params == params_initial
def test_update_commit(bls_bft_replicas, pre_prepare_with_bls):
params = create_commit_params(0, 0)
params_initial = copy(params)
for bls_bft_replica in bls_bft_replicas:
params = bls_bft_replica.update_commit(copy(params_initial), pre_prepare_with_bls)
assert params != params_initial
# we fill BLS_MULTI_SIG by ' ' for backward compatibility
assert len(params) - len(params_initial) == 2
assert params[-2] == ' '
def test_update_commit_without_bls_crypto_signer(bls_bft_replicas, pre_prepare_with_bls):
params = create_commit_params(0, 0)
params_initial = copy(params)
for bls_bft_replica in bls_bft_replicas:
bls_crypto_signer = bls_bft_replica._bls_bft.bls_crypto_signer
bls_bft_replica._bls_bft.bls_crypto_signer = None
params = bls_bft_replica.update_commit(params,
pre_prepare_with_bls)
bls_bft_replica._bls_bft.bls_crypto_signer = bls_crypto_signer
assert params == params_initial
# ------ VALIDATE 3PC MESSAGES ------
def test_validate_pre_prepare_no_sigs(bls_bft_replicas, pre_prepare_no_bls):
for sender_bls_bft_replica in bls_bft_replicas:
for verifier_bls_bft_replica in bls_bft_replicas:
assert not verifier_bls_bft_replica.validate_pre_prepare(pre_prepare_no_bls,
sender_bls_bft_replica.node_id)
def test_validate_pre_prepare_correct_multi_sig(bls_bft_replicas, pre_prepare_with_bls):
for sender_bls_bft_replica in bls_bft_replicas:
for verifier_bls_bft_replica in bls_bft_replicas:
assert not verifier_bls_bft_replica.validate_pre_prepare(pre_prepare_with_bls,
sender_bls_bft_replica.node_id)
def test_validate_pre_prepare_does_not_use_committed_pool_state(bls_bft_replicas,
pre_prepare_with_bls,
monkeypatch):
for sender_bls_bft_replica in bls_bft_replicas:
for verifier_bls_bft_replica in bls_bft_replicas:
monkeypatch.setattr(verifier_bls_bft_replica._bls_bft.bls_key_register,
'get_pool_root_hash_committed',
lambda: None)
assert not verifier_bls_bft_replica.validate_pre_prepare(pre_prepare_with_bls,
sender_bls_bft_replica.node_id)
monkeypatch.undo()
def test_validate_pre_prepare_incorrect_multi_sig(bls_bft_replicas,
pre_prepare_with_incorrect_bls):
for sender_bls_bft in bls_bft_replicas:
for verifier_bls_bft in bls_bft_replicas:
status = verifier_bls_bft.validate_pre_prepare(pre_prepare_with_incorrect_bls,
sender_bls_bft.node_id)
assert status == BlsBftReplica.PPR_BLS_MULTISIG_WRONG
def test_validate_prepare(bls_bft_replicas, state_root):
prepare = create_prepare((0, 0), state_root)
for sender_bls_bft in bls_bft_replicas:
for verifier_bls_bft in bls_bft_replicas:
assert not verifier_bls_bft.validate_prepare(prepare, sender_bls_bft.node_id)
def test_validate_commit_no_sigs(bls_bft_replicas):
key = (0, 0)
commit = create_commit_no_bls_sig(key)
for sender_bls_bft in bls_bft_replicas:
for verifier_bls_bft in bls_bft_replicas:
assert not verifier_bls_bft.validate_commit(commit,
sender_bls_bft.node_id,
state_root)
def test_validate_commit_correct_sig_first_time(bls_bft_replicas, pre_prepare_no_bls):
key = (0, 0)
for sender_bls_bft in bls_bft_replicas:
commit = create_commit_bls_sig(sender_bls_bft, key, pre_prepare_no_bls)
for verifier_bls_bft in bls_bft_replicas:
assert not verifier_bls_bft.validate_commit(commit,
sender_bls_bft.node_id,
pre_prepare_no_bls)
def test_validate_commit_correct_sig_second_time(bls_bft_replicas, pre_prepare_with_bls):
key = (0, 0)
for sender_bls_bft in bls_bft_replicas:
commit = create_commit_bls_sig(sender_bls_bft, key, pre_prepare_with_bls)
for verifier_bls_bft in bls_bft_replicas:
assert verifier_bls_bft.validate_commit(commit,
sender_bls_bft.node_id,
pre_prepare_with_bls) is None
def test_validate_commit_does_not_use_committed_pool_state(bls_bft_replicas, pre_prepare_with_bls, monkeypatch):
key = (0, 0)
for sender_bls_bft in bls_bft_replicas:
commit = create_commit_bls_sig(sender_bls_bft, key, pre_prepare_with_bls)
for verifier_bls_bft in bls_bft_replicas:
monkeypatch.setattr(verifier_bls_bft._bls_bft.bls_key_register,
'get_pool_root_hash_committed',
lambda: None)
assert verifier_bls_bft.validate_commit(commit,
sender_bls_bft.node_id,
pre_prepare_with_bls) is None
monkeypatch.undo()
def test_validate_commit_incorrect_sig(bls_bft_replicas, pre_prepare_with_bls):
key = (0, 0)
for sender_bls_bft in bls_bft_replicas:
fake_sig = base58.b58encode(b"somefakesignaturesomefakesignaturesomefakesignature").decode("utf-8")
commit = create_commit_with_bls_sig(key, fake_sig)
for verifier_bls_bft in bls_bft_replicas:
status = verifier_bls_bft.validate_commit(commit,
sender_bls_bft.node_id,
pre_prepare_with_bls)
assert status == BlsBftReplica.CM_BLS_SIG_WRONG
def test_validate_commit_incorrect_value(bls_bft_replicas, pre_prepare_incorrect, pre_prepare_no_bls):
key = (0, 0)
for sender_bls_bft in bls_bft_replicas:
commit = create_commit_bls_sig(sender_bls_bft, key, pre_prepare_incorrect)
for verifier_bls_bft in bls_bft_replicas:
status = verifier_bls_bft.validate_commit(commit,
sender_bls_bft.node_id,
pre_prepare_no_bls)
assert status == BlsBftReplica.CM_BLS_SIG_WRONG
# ------ PROCESS 3PC MESSAGES ------
def test_process_pre_prepare_no_multisig(bls_bft_replicas, pre_prepare_no_bls):
for sender_bls_bft in bls_bft_replicas:
for verifier_bls_bft in bls_bft_replicas:
verifier_bls_bft.process_pre_prepare(pre_prepare_no_bls, sender_bls_bft.node_id)
def test_process_pre_prepare_multisig(bls_bft_replicas, pre_prepare_with_bls):
for sender_bls_bft in bls_bft_replicas:
for verifier_bls_bft in bls_bft_replicas:
verifier_bls_bft.process_pre_prepare(pre_prepare_with_bls, sender_bls_bft.node_id)
def test_process_prepare(bls_bft_replicas, state_root):
for sender_bls_bft in bls_bft_replicas:
prepare = create_prepare((0, 0), state_root)
for verifier_bls_bft in bls_bft_replicas:
verifier_bls_bft.process_prepare(prepare, sender_bls_bft.node_id)
def test_process_commit_no_sigs(bls_bft_replicas):
for sender_bls_bft in bls_bft_replicas:
commit = create_commit_no_bls_sig((0, 0))
for verifier_bls_bft in bls_bft_replicas:
verifier_bls_bft.process_commit(commit,
sender_bls_bft.node_id)
def test_process_commit_with_sigs(bls_bft_replicas, pre_prepare_no_bls):
key = (0, 0)
for sender_bls_bft in bls_bft_replicas:
commit = create_commit_bls_sig(sender_bls_bft, key, pre_prepare_no_bls)
for verifier_bls_bft in bls_bft_replicas:
verifier_bls_bft.process_commit(commit,
sender_bls_bft.node_id)
def test_process_order(bls_bft_replicas, pre_prepare_no_bls, quorums):
key = (0, 0)
process_commits_for_key(key, pre_prepare_no_bls, bls_bft_replicas)
for bls_bft in bls_bft_replicas:
bls_bft.process_order(key,
quorums,
pre_prepare_no_bls)
# ------ MULTIPLE MULTI_SIGS ------
def test_update_pre_prepare_after_ordered_with_multiple_sigs(bls_bft_replicas, state_root, fake_multi_sig,
multi_signature, ledger_id):
for bls_bft_replica in bls_bft_replicas:
bls_bft_replica._all_bls_latest_multi_sigs = [fake_multi_sig, multi_signature]
params = create_pre_prepare_params(state_root, ledger_id=ledger_id)
params_initial = copy(params)
for bls_bft_replica in bls_bft_replicas:
params = bls_bft_replica.update_pre_prepare(copy(params_initial), ledger_id)
assert params != params_initial
# we fill BLS_MULTI_SIG by None for backward compatibility
assert len(params) - len(params_initial) == 2
assert params[-2] is None
def test_validate_pre_prepare_multiple_correct_multi_sigs(bls_bft_replicas, pre_prepare_with_bls_multi):
for sender_bls_bft_replica in bls_bft_replicas:
for verifier_bls_bft_replica in bls_bft_replicas:
assert not verifier_bls_bft_replica.validate_pre_prepare(pre_prepare_with_bls_multi,
sender_bls_bft_replica.node_id)
def test_validate_commit_incorrect_sig_with_multiple_sigs(bls_bft_replicas, pre_prepare_with_bls_multi):
key = (0, 0)
for sender_bls_bft in bls_bft_replicas:
fake_sig = base58.b58encode(b"somefakesignaturesomefakesignaturesomefakesignature").decode("utf-8")
commit = create_commit_with_bls_sigs(key, fake_sig, pre_prepare_with_bls_multi.ledgerId)
for verifier_bls_bft in bls_bft_replicas:
status = verifier_bls_bft.validate_commit(commit,
sender_bls_bft.node_id,
pre_prepare_with_bls_multi)
assert status == BlsBftReplica.CM_BLS_SIG_WRONG
def test_validate_commit_with_multiple_sigs_one_sig_incorrect(bls_bft_replicas, pre_prepare_with_bls):
key = (0, 0)
for sender_bls_bft in bls_bft_replicas:
fake_sig = base58.b58encode(b"somefakesignaturesomefakesignaturesomefakesignature").decode("utf-8")
commit = create_commit_bls_sig(sender_bls_bft, key, pre_prepare_with_bls)
commit.blsSigs[str(3)] = fake_sig
for verifier_bls_bft in bls_bft_replicas:
status = verifier_bls_bft.validate_commit(commit,
sender_bls_bft.node_id,
pre_prepare_with_bls)
assert status == BlsBftReplica.CM_BLS_SIG_WRONG
def test_validate_commit_correct_sig_with_multiple_sigs(bls_bft_replicas, pre_prepare_no_bls):
key = (0, 0)
for sender_bls_bft in bls_bft_replicas:
commit = create_commit_bls_sig(sender_bls_bft, key, pre_prepare_no_bls)
for verifier_bls_bft in bls_bft_replicas:
assert not verifier_bls_bft.validate_commit(commit,
sender_bls_bft.node_id,
pre_prepare_no_bls)
def test_process_pre_prepare_with_multiple_sigs(bls_bft_replicas, pre_prepare_with_bls_multi):
for sender_bls_bft in bls_bft_replicas:
for verifier_bls_bft in bls_bft_replicas:
verifier_bls_bft.process_pre_prepare(pre_prepare_with_bls_multi, sender_bls_bft.node_id)
def test_process_commit_with_multiple_sigs(bls_bft_replicas, pre_prepare_with_bls_multi):
for sender_bls_bft in bls_bft_replicas:
commit = create_commit_bls_sig(sender_bls_bft, (0, 0), pre_prepare_with_bls_multi)
assert commit.blsSigs is not None
for verifier_bls_bft in bls_bft_replicas:
verifier_bls_bft.process_commit(commit,
sender_bls_bft.node_id)
def test_process_order_with_multiple_sigs(bls_bft_replicas, pre_prepare_with_bls, quorums):
key = (0, 0)
process_commits_for_key(key, pre_prepare_with_bls, bls_bft_replicas)
for bls_bft in bls_bft_replicas:
bls_bft.process_order(key,
quorums,
pre_prepare_with_bls)
# ------ CREATE MULTI_SIG ------
def test_create_multi_sig_from_all(bls_bft_replicas, quorums, pre_prepare_no_bls):
multi_sig = calculate_multi_sig(
creator=bls_bft_replicas[0],
bls_bft_with_commits=bls_bft_replicas,
quorums=quorums,
pre_prepare=pre_prepare_no_bls
)
assert multi_sig
assert isinstance(multi_sig, MultiSignature)
def test_create_multi_sig_quorum(bls_bft_replicas, quorums, pre_prepare_no_bls):
# success on n-f=3
multi_sig = calculate_multi_sig(
creator=bls_bft_replicas[0],
bls_bft_with_commits=bls_bft_replicas[1:],
quorums=quorums,
pre_prepare=pre_prepare_no_bls
)
assert multi_sig
assert isinstance(multi_sig, MultiSignature)
def test_create_multi_sig_no_quorum(bls_bft_replicas, quorums, pre_prepare_no_bls):
# not success on 2
multi_sig = calculate_multi_sig(
creator=bls_bft_replicas[0],
bls_bft_with_commits=bls_bft_replicas[2:],
quorums=quorums,
pre_prepare=pre_prepare_no_bls,
)
assert not multi_sig
def test_create_multi_sig_no_quorum_empty(bls_bft_replicas, quorums, pre_prepare_no_bls):
multi_sig = calculate_multi_sig(
creator=bls_bft_replicas[0],
bls_bft_with_commits=[],
quorums=quorums,
pre_prepare=pre_prepare_no_bls
)
assert not multi_sig
def test_create_multi_sig_are_equal(bls_bft_replicas, quorums, pre_prepare_no_bls):
multi_sigs = []
for creator in bls_bft_replicas:
multi_sig = calculate_multi_sig(
creator=creator,
bls_bft_with_commits=bls_bft_replicas,
quorums=quorums,
pre_prepare=pre_prepare_no_bls
)
multi_sigs.append(multi_sig)
assert all(x == multi_sigs[0] for x in multi_sigs)
# ------ MULTI_SIG SAVED ------
def test_signatures_cached_for_commits(bls_bft_replicas, ledger_id):
key1 = (0, 0)
pre_prepare1 = create_pre_prepare_no_bls(generate_state_root())
process_commits_for_key(key1, pre_prepare1, bls_bft_replicas)
for bls_bft in bls_bft_replicas:
assert len(bls_bft._all_signatures) == 1
# we have multi-sigs for all ledgers in PrePrepare, see _patch_audit_ledger
assert len(bls_bft._all_signatures[key1]) == len({ledger_id, 3, POOL_LEDGER_ID})
assert str(ledger_id) in bls_bft._all_signatures[key1]
assert len(bls_bft._all_signatures[key1][str(ledger_id)]) == len(bls_bft_replicas)
pre_prepare2 = create_pre_prepare_no_bls(generate_state_root())
process_commits_for_key(key1, pre_prepare2, bls_bft_replicas)
for bls_bft in bls_bft_replicas:
assert len(bls_bft._all_signatures) == 1
# we have multi-sigs for all ledgers in PrePrepare, see _patch_audit_ledger
assert len(bls_bft._all_signatures[key1]) == len({ledger_id, 3, POOL_LEDGER_ID})
assert str(ledger_id) in bls_bft._all_signatures[key1]
assert len(bls_bft._all_signatures[key1][str(ledger_id)]) == len(bls_bft_replicas)
key2 = (0, 1)
pre_prepare3 = create_pre_prepare_no_bls(generate_state_root())
process_commits_for_key(key2, pre_prepare3, bls_bft_replicas)
for bls_bft in bls_bft_replicas:
assert len(bls_bft._all_signatures) == 2
for key in [key1, key2]:
# we have multi-sigs for all ledgers in PrePrepare, see _patch_audit_ledger
assert len(bls_bft._all_signatures[key]) == len({ledger_id, 3, POOL_LEDGER_ID})
assert str(ledger_id) in bls_bft._all_signatures[key]
assert len(bls_bft._all_signatures[key][str(ledger_id)]) == len(bls_bft_replicas)
pre_prepare4 = create_pre_prepare_no_bls(generate_state_root())
process_commits_for_key(key2, pre_prepare4, bls_bft_replicas)
for bls_bft in bls_bft_replicas:
assert len(bls_bft._all_signatures) == 2
for key in [key1, key2]:
# we have multi-sigs for all ledgers in PrePrepare, see _patch_audit_ledger
assert len(bls_bft._all_signatures[key]) == len({ledger_id, 3, POOL_LEDGER_ID})
assert str(ledger_id) in bls_bft._all_signatures[key]
assert len(bls_bft._all_signatures[key][str(ledger_id)]) == len(bls_bft_replicas)
key3 = (1, 0)
pre_prepare5 = create_pre_prepare_no_bls(generate_state_root())
process_commits_for_key(key3, pre_prepare5, bls_bft_replicas)
for bls_bft in bls_bft_replicas:
assert len(bls_bft._all_signatures) == 3
for key in [key1, key2, key3]:
# we have multi-sigs for all ledgers in PrePrepare, see _patch_audit_ledger
assert len(bls_bft._all_signatures[key]) == len({ledger_id, 3, POOL_LEDGER_ID})
assert str(ledger_id) in bls_bft._all_signatures[key]
assert len(bls_bft._all_signatures[key][str(ledger_id)]) == len(bls_bft_replicas)
pre_prepare6 = create_pre_prepare_no_bls(generate_state_root())
process_commits_for_key(key3, pre_prepare6, bls_bft_replicas)
for bls_bft in bls_bft_replicas:
assert len(bls_bft._all_signatures) == 3
for key in [key1, key2, key3]:
# we have multi-sigs for all ledgers in PrePrepare, see _patch_audit_ledger
assert len(bls_bft._all_signatures[key]) == len({ledger_id, 3, POOL_LEDGER_ID})
assert str(ledger_id) in bls_bft._all_signatures[key]
assert len(bls_bft._all_signatures[key][str(ledger_id)]) == len(bls_bft_replicas)
def test_multi_sig_saved_locally_for_ordered(bls_bft_replicas, pre_prepare_no_bls,
state_root, quorums):
key = (0, 0)
process_commits_for_key(key, pre_prepare_no_bls, bls_bft_replicas)
process_ordered(key, bls_bft_replicas, pre_prepare_no_bls, quorums)
for bls_bft_replica in bls_bft_replicas:
assert bls_bft_replica._bls_bft.bls_store.get(state_root)
def test_multi_sig_saved_shared_with_pre_prepare(bls_bft_replicas, quorums, pre_prepare_with_bls):
multi_sigs = []
for bls_bft_replica in bls_bft_replicas:
bls_bft_replica.process_pre_prepare(pre_prepare_with_bls, bls_bft_replicas[0].node_id)
multi_sig = bls_bft_replica._bls_bft.bls_store.get(pre_prepare_with_bls.stateRootHash)
assert multi_sig
multi_sigs.append(multi_sig)
# all saved multi-sigs are equal
assert all(x == multi_sigs[0] for x in multi_sigs)
def test_preprepare_multisig_replaces_saved(bls_bft_replicas, quorums,
pre_prepare_no_bls, pre_prepare_with_bls):
# have locally calculated multi-sigs
key = (0, 0)
state_root = pre_prepare_no_bls.stateRootHash
for sender_bls_bft_replica in bls_bft_replicas:
commit = create_commit_bls_sig(
sender_bls_bft_replica,
key,
pre_prepare_no_bls)
for verifier_bls_bft_replica in bls_bft_replicas:
# use 3 of 4 commits only
if verifier_bls_bft_replica != sender_bls_bft_replica:
verifier_bls_bft_replica.process_commit(commit,
sender_bls_bft_replica.node_id)
process_ordered(key, bls_bft_replicas, pre_prepare_no_bls, quorums)
# get locally calculated multi-sigs
local_multi_sigs = {}
for bls_bft_replica in bls_bft_replicas:
local_multi_sigs[bls_bft_replica.node_id] = bls_bft_replica._bls_bft.bls_store.get(state_root)
# have multi-sig for PrePrepare (make it different from the local one by using al 4 nodes)
# get multi-sigs get with PrePrepare and make sure they differ from local ones
# the local ones must be overridden
multi_sigs = []
for bls_bft_replica in bls_bft_replicas:
bls_bft_replica.process_pre_prepare(pre_prepare_with_bls, bls_bft_replicas[0].node_id)
multi_sig = bls_bft_replica._bls_bft.bls_store.get(state_root)
local_multi_sig = local_multi_sigs[bls_bft_replica.node_id]
assert multi_sig
assert local_multi_sig
assert multi_sig != local_multi_sig
multi_sigs.append(multi_sig)
# all saved multi-sigs are equal
assert all(x == multi_sigs[0] for x in multi_sigs)
# ------ GC ------
def test_commits_gc(bls_bft_replicas, ledger_id):
key1 = (0, 0)
pre_prepare1 = create_pre_prepare_no_bls(generate_state_root())
process_commits_for_key(key1, pre_prepare1, bls_bft_replicas)
key2 = (0, 1)
pre_prepare2 = create_pre_prepare_no_bls(generate_state_root())
process_commits_for_key(key2, pre_prepare2, bls_bft_replicas)
key3 = (1, 2)
pre_prepare3 = create_pre_prepare_no_bls(generate_state_root())
process_commits_for_key(key3, pre_prepare3, bls_bft_replicas)
for bls_bft in bls_bft_replicas:
assert len(bls_bft._all_signatures) == 3
assert key1 in bls_bft._all_signatures
assert key2 in bls_bft._all_signatures
assert key3 in bls_bft._all_signatures
for bls_bft in bls_bft_replicas:
bls_bft.gc((0, 1))
for bls_bft in bls_bft_replicas:
assert len(bls_bft._all_signatures) == 1
assert not key1 in bls_bft._all_signatures
assert not key2 in bls_bft._all_signatures
# we have multi-sigs for all ledgers in PrePrepare, see _patch_audit_ledger
assert len(bls_bft._all_signatures[key3]) == len({ledger_id, 3, POOL_LEDGER_ID})
assert str(ledger_id) in bls_bft._all_signatures[key3]
assert len(bls_bft._all_signatures[key3][str(ledger_id)]) == len(bls_bft_replicas)
| apache-2.0 |
melodous/designate | designate/backend/impl_powerdns/__init__.py | 1 | 17862 | # Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.
# Copyright 2012 Managed I.T.
#
# Author: Patrick Galbraith <patg@hp.com>
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import threading
from oslo.config import cfg
from oslo.db import options
from sqlalchemy.sql import select
from designate.openstack.common import excutils
from designate.openstack.common import log as logging
from designate.i18n import _LC
from designate import exceptions
from designate.backend import base
from designate.backend.impl_powerdns import tables
from designate.sqlalchemy import session
from designate.sqlalchemy.expressions import InsertFromSelect
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TSIG_SUPPORTED_ALGORITHMS = ['hmac-md5']
CONF.register_group(cfg.OptGroup(
name='backend:powerdns', title="Configuration for Powerdns Backend"
))
CONF.register_opts([
cfg.StrOpt('domain-type', default='NATIVE', help='PowerDNS Domain Type'),
cfg.ListOpt('also-notify', default=[], help='List of additional IPs to '
'send NOTIFYs to'),
] + options.database_opts, group='backend:powerdns')
# Overide the default DB connection registered above, to avoid name conflicts
# between the Designate and PowerDNS databases.
CONF.set_default('connection', 'sqlite:///$state_path/powerdns.sqlite',
group='backend:powerdns')
def _map_col(keys, col):
return dict([(keys[i], col[i]) for i in range(len(keys))])
class PowerDNSBackend(base.Backend):
__plugin_name__ = 'powerdns'
def __init__(self, *args, **kwargs):
super(PowerDNSBackend, self).__init__(*args, **kwargs)
self.local_store = threading.local()
def start(self):
super(PowerDNSBackend, self).start()
@property
def session(self):
# NOTE: This uses a thread local store, allowing each greenthread to
# have it's own session stored correctly. Without this, each
# greenthread may end up using a single global session, which
# leads to bad things happening.
global LOCAL_STORE
if not hasattr(self.local_store, 'session'):
self.local_store.session = session.get_session(self.name)
return self.local_store.session
def _create(self, table, values):
query = table.insert()
resultproxy = self.session.execute(query, values)
# Refetch the row, for generated columns etc
query = select([table])\
.where(table.c.id == resultproxy.inserted_primary_key[0])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _update(self, table, values, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.update()\
.where(id_col == values[id_col.name])\
.values(**values)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# Refetch the row, for generated columns etc
query = select([table])\
.where(id_col == values[id_col.name])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _get(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = select([table])\
.where(id_col == id_)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) != 1:
raise exc_notfound()
# Map col keys to values in result
return _map_col(query.columns.keys(), results[0])
def _delete(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.delete()\
.where(id_col == id_)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# TSIG Key Methods
def create_tsigkey(self, context, tsigkey):
"""Create a TSIG Key"""
if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:
raise exceptions.NotImplemented('Unsupported algorithm')
values = {
'designate_id': tsigkey['id'],
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
}
self._create(tables.tsigkeys, values)
# NOTE(kiall): Prepare and execute query to install this TSIG Key on
# every domain. We use a manual query here since anything
# else would be impossibly slow.
query_select = select([
tables.domains.c.id,
"'TSIG-ALLOW-AXFR'",
"'%s'" % tsigkey['name']]
)
columns = [
tables.domain_metadata.c.domain_id,
tables.domain_metadata.c.kind,
tables.domain_metadata.c.content,
]
query = InsertFromSelect(tables.domain_metadata, query_select,
columns)
# NOTE(kiall): A TX is required for, at the least, SQLite.
self.session.begin()
self.session.execute(query)
self.session.commit()
def update_tsigkey(self, context, tsigkey):
"""Update a TSIG Key"""
values = self._get(
tables.tsigkeys,
tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
# Store a copy of the original name..
original_name = values['name']
values.update({
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
})
self._update(tables.tsigkeys, values,
id_col=tables.tsigkeys.c.designate_id,
exc_notfound=exceptions.TsigKeyNotFound)
# If the name changed, Update the necessary DomainMetadata records
if original_name != tsigkey['name']:
query = tables.domain_metadata.update()\
.where(tables.domain_metadata.c.kind == 'TSIG_ALLOW_AXFR')\
.where(tables.domain_metadata.c.content == original_name)
query.values(content=tsigkey['name'])
self.session.execute(query)
def delete_tsigkey(self, context, tsigkey):
"""Delete a TSIG Key"""
try:
# Delete this TSIG Key itself
self._delete(
tables.tsigkeys, tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
except exceptions.TsigKeyNotFound:
# If the TSIG Key is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a TSIG key which is '
'not present in the backend. ID: %s') %
tsigkey['id'])
return
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.kind == 'TSIG-ALLOW-AXFR')\
.where(tables.domain_metadata.c.content == tsigkey['name'])
self.session.execute(query)
# Domain Methods
def create_domain(self, context, domain):
try:
self.session.begin()
servers = self.central_service.find_servers(self.admin_context)
domain_values = {
'designate_id': domain['id'],
'name': domain['name'].rstrip('.'),
'master': servers[0]['name'].rstrip('.'),
'type': CONF['backend:powerdns'].domain_type,
'account': context.tenant
}
domain_ref = self._create(tables.domains, domain_values)
# Install all TSIG Keys on this domain
query = select([tables.tsigkeys.c.name])
resultproxy = self.session.execute(query)
values = [i for i in resultproxy.fetchall()]
self._update_domainmetadata(domain_ref['id'], 'TSIG-ALLOW-AXFR',
values)
# Install all Also Notify's on this domain
self._update_domainmetadata(domain_ref['id'], 'ALSO-NOTIFY',
CONF['backend:powerdns'].also_notify)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_domain(self, context, domain):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
try:
self.session.begin()
# Update the Records TTLs where necessary
query = tables.records.update()\
.where(tables.records.c.domain_id == domain_ref['id'])
query = query.where(tables.records.c.inherit_ttl == True) # noqa\
query = query.values(ttl=domain['ttl'])
self.session.execute(query)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_domain(self, context, domain):
try:
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
except exceptions.DomainNotFound:
# If the Domain is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a domain which is '
'not present in the backend. ID: %s') %
domain['id'])
return
self._delete(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
# Ensure the records are deleted
query = tables.records.delete()\
.where(tables.records.c.domain_id == domain_ref['id'])
self.session.execute(query)
# Ensure domainmetadata is deleted
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.domain_id == domain_ref['id'])
self.session.execute(query)
# RecordSet Methods
def create_recordset(self, context, domain, recordset):
try:
self.session.begin(subtransactions=True)
# Create all the records..
for record in recordset.records:
self.create_record(context, domain, recordset, record)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_recordset(self, context, domain, recordset):
# TODO(kiall): This is a total kludge. Intended as the simplest
# possible fix for the issue. This needs to be
# re-implemented correctly.
try:
self.session.begin(subtransactions=True)
self.delete_recordset(context, domain, recordset)
self.create_recordset(context, domain, recordset)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_recordset(self, context, domain, recordset):
# Ensure records are deleted
query = tables.records.delete()\
.where(tables.records.c.designate_recordset_id == recordset['id'])
self.session.execute(query)
# Record Methods
def create_record(self, context, domain, recordset, record):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_values = {
'designate_id': record['id'],
'designate_recordset_id': record['recordset_id'],
'domain_id': domain_ref['id'],
'name': recordset['name'].rstrip('.'),
'type': recordset['type'],
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
}
self._create(tables.records, record_values)
def update_record(self, context, domain, recordset, record):
record_ref = self._get_record(record['id'])
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_ref.update({
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
})
self._update(tables.records, record_ref,
exc_notfound=exceptions.RecordNotFound)
def delete_record(self, context, domain, recordset, record):
try:
record_ref = self._get(tables.records, record['id'],
exceptions.RecordNotFound,
id_col=tables.records.c.designate_id)
except exceptions.RecordNotFound:
# If the Record is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a record which is '
'not present in the backend. ID: %s') %
record['id'])
else:
self._delete(tables.records, record_ref['id'],
exceptions.RecordNotFound)
# Internal Methods
def _update_domainmetadata(self, domain_id, kind, values=None,
delete=True):
"""Updates a domain's metadata with new values"""
# Fetch all current metadata of the specified kind
values = values or []
query = select([tables.domain_metadata.c.content])\
.where(tables.domain_metadata.c.domain_id == domain_id)\
.where(tables.domain_metadata.c.kind == kind)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
for metadata_id, content in results:
if content not in values:
if delete:
LOG.debug('Deleting stale domain metadata: %r' %
([domain_id, kind, content],))
# Delete no longer necessary values
# We should never get a notfound here, so UnknownFailure is
# a reasonable choice.
self._delete(tables.domain_metadata, metadata_id,
exceptions.UnknownFailure)
else:
# Remove pre-existing values from the list of values to insert
values.remove(content)
# Insert new values
for value in values:
LOG.debug('Inserting new domain metadata: %r' %
([domain_id, kind, value],))
self._create(
tables.domain_metadata,
{
"domain_id": domain_id,
"kind": kind,
"content": value
})
def _is_authoritative(self, domain, recordset, record):
# NOTE(kiall): See http://doc.powerdns.com/dnssec-modes.html
if recordset['type'] == 'NS' and recordset['name'] != domain['name']:
return False
else:
return True
def _sanitize_content(self, type, content):
if type in ('CNAME', 'MX', 'SRV', 'NS', 'PTR'):
return content.rstrip('.')
if type in ('TXT', 'SPF'):
return '"%s"' % content.replace('"', '\\"')
return content
def _get_record(self, record_id=None, domain=None, type_=None):
query = select([tables.records])
if record_id:
query = query.where(tables.records.c.designate_id == record_id)
if type_:
query = query.where(tables.records.c.type == type_)
if domain:
query = query.where(tables.records.c.domain_id == domain['id'])
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) < 1:
raise exceptions.RecordNotFound('No record found')
elif len(results) > 1:
raise exceptions.RecordNotFound('Too many records found')
else:
return _map_col(query.columns.keys(), results[0])
| apache-2.0 |
rohanp/scikit-learn | sklearn/model_selection/tests/test_validation.py | 20 | 27961 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from test_split import MockClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) // 2
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y)
def test_cross_val_score_predict_labels():
# Check if ValueError (when labels is None) propagates to cross_val_score
# and cross_val_predict
# And also check if labels is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_label, _, pvalue_label = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, labels=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_check_is_permutation():
p = np.arange(100)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/sleekxmpp/api.py | 13 | 7436 | from sleekxmpp.xmlstream import JID
class APIWrapper(object):
def __init__(self, api, name):
self.api = api
self.name = name
if name not in self.api.settings:
self.api.settings[name] = {}
def __getattr__(self, attr):
"""Curry API management commands with the API name."""
if attr == 'name':
return self.name
elif attr == 'settings':
return self.api.settings[self.name]
elif attr == 'register':
def partial(handler, op, jid=None, node=None, default=False):
register = getattr(self.api, attr)
return register(handler, self.name, op, jid, node, default)
return partial
elif attr == 'register_default':
def partial(handler, op, jid=None, node=None):
return getattr(self.api, attr)(handler, self.name, op)
return partial
elif attr in ('run', 'restore_default', 'unregister'):
def partial(*args, **kwargs):
return getattr(self.api, attr)(self.name, *args, **kwargs)
return partial
return None
def __getitem__(self, attr):
def partial(jid=None, node=None, ifrom=None, args=None):
return self.api.run(self.name, attr, jid, node, ifrom, args)
return partial
class APIRegistry(object):
def __init__(self, xmpp):
self._handlers = {}
self._handler_defaults = {}
self.xmpp = xmpp
self.settings = {}
def _setup(self, ctype, op):
"""Initialize the API callback dictionaries.
:param string ctype: The name of the API to initialize.
:param string op: The API operation to initialize.
"""
if ctype not in self.settings:
self.settings[ctype] = {}
if ctype not in self._handler_defaults:
self._handler_defaults[ctype] = {}
if ctype not in self._handlers:
self._handlers[ctype] = {}
if op not in self._handlers[ctype]:
self._handlers[ctype][op] = {'global': None,
'jid': {},
'node': {}}
def wrap(self, ctype):
"""Return a wrapper object that targets a specific API."""
return APIWrapper(self, ctype)
def purge(self, ctype):
"""Remove all information for a given API."""
del self.settings[ctype]
del self._handler_defaults[ctype]
del self._handlers[ctype]
def run(self, ctype, op, jid=None, node=None, ifrom=None, args=None):
"""Execute an API callback, based on specificity.
The API callback that is executed is chosen based on the combination
of the provided JID and node:
JID | node | Handler
==============================
Given | Given | Node handler
Given | None | JID handler
None | None | Global handler
A node handler is responsible for servicing a single node at a single
JID, while a JID handler may respond for any node at a given JID, and
the global handler will answer to any JID+node combination.
Handlers should check that the JID ``ifrom`` is authorized to perform
the desired action.
:param string ctype: The name of the API to use.
:param string op: The API operation to perform.
:param JID jid: Optionally provide specific JID.
:param string node: Optionally provide specific node.
:param JID ifrom: Optionally provide the requesting JID.
:param tuple args: Optional positional arguments to the handler.
"""
self._setup(ctype, op)
if not jid:
jid = self.xmpp.boundjid
elif jid and not isinstance(jid, JID):
jid = JID(jid)
elif jid == JID(''):
jid = self.xmpp.boundjid
if node is None:
node = ''
if self.xmpp.is_component:
if self.settings[ctype].get('component_bare', False):
jid = jid.bare
else:
jid = jid.full
else:
if self.settings[ctype].get('client_bare', False):
jid = jid.bare
else:
jid = jid.full
jid = JID(jid)
handler = self._handlers[ctype][op]['node'].get((jid, node), None)
if handler is None:
handler = self._handlers[ctype][op]['jid'].get(jid, None)
if handler is None:
handler = self._handlers[ctype][op].get('global', None)
if handler:
try:
return handler(jid, node, ifrom, args)
except TypeError:
# To preserve backward compatibility, drop the ifrom
# parameter for existing handlers that don't understand it.
return handler(jid, node, args)
def register(self, handler, ctype, op, jid=None, node=None, default=False):
"""Register an API callback, with JID+node specificity.
The API callback can later be executed based on the
specificity of the provided JID+node combination.
See :meth:`~ApiRegistry.run` for more details.
:param string ctype: The name of the API to use.
:param string op: The API operation to perform.
:param JID jid: Optionally provide specific JID.
:param string node: Optionally provide specific node.
"""
self._setup(ctype, op)
if jid is None and node is None:
if handler is None:
handler = self._handler_defaults[op]
self._handlers[ctype][op]['global'] = handler
elif jid is not None and node is None:
self._handlers[ctype][op]['jid'][jid] = handler
else:
self._handlers[ctype][op]['node'][(jid, node)] = handler
if default:
self.register_default(handler, ctype, op)
def register_default(self, handler, ctype, op):
"""Register a default, global handler for an operation.
:param func handler: The default, global handler for the operation.
:param string ctype: The name of the API to modify.
:param string op: The API operation to use.
"""
self._setup(ctype, op)
self._handler_defaults[ctype][op] = handler
def unregister(self, ctype, op, jid=None, node=None):
"""Remove an API callback.
The API callback chosen for removal is based on the
specificity of the provided JID+node combination.
See :meth:`~ApiRegistry.run` for more details.
:param string ctype: The name of the API to use.
:param string op: The API operation to perform.
:param JID jid: Optionally provide specific JID.
:param string node: Optionally provide specific node.
"""
self._setup(ctype, op)
self.register(None, ctype, op, jid, node)
def restore_default(self, ctype, op, jid=None, node=None):
"""Reset an API callback to use a default handler.
:param string ctype: The name of the API to use.
:param string op: The API operation to perform.
:param JID jid: Optionally provide specific JID.
:param string node: Optionally provide specific node.
"""
self.unregister(ctype, op, jid, node)
self.register(self._handler_defaults[ctype][op], ctype, op, jid, node)
| gpl-2.0 |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/Cryptodome/Util/_file_system.py | 7 | 2183 | # ===================================================================
#
# Copyright (c) 2016, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import os
def pycryptodome_filename(dir_comps, filename):
"""Return the complete file name for the module
dir_comps : list of string
The list of directory names in the PyCryptodome package.
The first element must be "Cryptodome".
filename : string
The filename (inclusing extension) in the target directory.
"""
if dir_comps[0] != "Cryptodome":
raise ValueError("Only available for modules under 'Cryptodome'")
dir_comps = list(dir_comps[1:]) + [filename]
util_lib, _ = os.path.split(os.path.abspath(__file__))
root_lib = os.path.join(util_lib, "..")
return os.path.join(root_lib, *dir_comps)
| gpl-2.0 |
windyuuy/opera | chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/text_unittest.py | 122 | 3616 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for text_style.py."""
import unittest2 as unittest
import text as text_style
from text import TextChecker
class TextStyleTestCase(unittest.TestCase):
"""TestCase for text_style.py"""
def assertNoError(self, lines):
"""Asserts that the specified lines has no errors."""
self.had_error = False
def error_for_test(line_number, category, confidence, message):
"""Records if an error occurs."""
self.had_error = True
text_style.process_file_data('', lines, error_for_test)
self.assertFalse(self.had_error, '%s should not have any errors.' % lines)
def assertError(self, lines, expected_line_number):
"""Asserts that the specified lines has an error."""
self.had_error = False
def error_for_test(line_number, category, confidence, message):
"""Checks if the expected error occurs."""
self.assertEqual(expected_line_number, line_number)
self.assertEqual('whitespace/tab', category)
self.had_error = True
text_style.process_file_data('', lines, error_for_test)
self.assertTrue(self.had_error, '%s should have an error [whitespace/tab].' % lines)
def test_no_error(self):
"""Tests for no error cases."""
self.assertNoError([''])
self.assertNoError(['abc def', 'ggg'])
def test_error(self):
"""Tests for error cases."""
self.assertError(['2009-12-16\tKent Tamura\t<tkent@chromium.org>'], 1)
self.assertError(['2009-12-16 Kent Tamura <tkent@chromium.org>',
'',
'\tReviewed by NOBODY.'], 3)
class TextCheckerTest(unittest.TestCase):
"""Tests TextChecker class."""
def mock_handle_style_error(self):
pass
def test_init(self):
"""Test __init__ constructor."""
checker = TextChecker("foo.txt", self.mock_handle_style_error)
self.assertEqual(checker.file_path, "foo.txt")
self.assertEqual(checker.handle_style_error, self.mock_handle_style_error)
| bsd-3-clause |
SpookW/three.js | utils/exporters/blender/addons/io_three/exporter/api/mesh.py | 55 | 26672 | """
Blender API for querying mesh data. Animation data is also
handled here since Three.js associates the animation (skeletal,
morph targets) with the geometry nodes.
"""
import operator
import re
from bpy import data, types, context
from . import material, texture, animation
from . import object as object_
from .. import constants, utilities, logger, exceptions
def _mesh(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Mesh):
mesh = name
else:
mesh = data.meshes[name]
return func(mesh, *args, **kwargs)
return inner
@_mesh
def skeletal_animation(mesh, options):
"""
:param mesh:
:param options:
:rtype: []
"""
logger.debug("mesh.animation(%s, %s)", mesh, options)
armature = _armature(mesh)
if not armature:
logger.warning("No armature found (%s)", mesh)
return []
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
dispatch = {
constants.POSE: animation.pose_animation,
constants.REST: animation.rest_animation
}
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
animations = func(armature, options)
# armature.data.pose_position = pose_position
return animations
@_mesh
def bones(mesh, options):
"""
:param mesh:
:param options:
:rtype: [], {}
"""
logger.debug("mesh.bones(%s)", mesh)
armature = _armature(mesh)
if not armature:
return [], {}
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
if anim_type == constants.OFF:
logger.info("Animation type not set, defaulting "
"to using REST position for the armature.")
func = _rest_bones
# armature.data.pose_position = "REST"
else:
dispatch = {
constants.REST: _rest_bones,
constants.POSE: _pose_bones
}
logger.info("Using %s for the armature", anim_type)
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
bones_, bone_map = func(armature)
# armature.data.pose_position = pose_position
return (bones_, bone_map)
@_mesh
def buffer_normal(mesh):
"""
:param mesh:
:rtype: []
"""
normals_ = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
normals_.extend(vector)
return normals_
@_mesh
def buffer_position(mesh):
"""
:param mesh:
:rtype: []
"""
position = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
vertex = mesh.vertices[vertex_index]
vector = (vertex.co.x, vertex.co.y, vertex.co.z)
position.extend(vector)
return position
@_mesh
def buffer_uv(mesh):
"""
:param mesh:
:rtype: []
"""
uvs_ = []
if len(mesh.uv_layers) is 0:
return uvs_
elif len(mesh.uv_layers) > 1:
# if memory serves me correctly buffer geometry
# only uses one UV layer
logger.warning("%s has more than 1 UV layer", mesh.name)
for uv_data in mesh.uv_layers[0].data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
uvs_.extend(uv_tuple)
return uvs_
@_mesh
def extra_vertex_groups(mesh, patterns_string):
"""
Returns (name,index) tuples for the extra (non-skinning) vertex groups
matching the given patterns.
The patterns are comma-separated where the star character can be used
as a wildcard character sequence.
:param mesh:
:param patterns_string:
:rtype: []
"""
logger.debug("mesh._extra_vertex_groups(%s)", mesh)
pattern_re = None
extra_vgroups = []
if not patterns_string.strip():
return extra_vgroups
armature = _armature(mesh)
obj = object_.objects_using_mesh(mesh)[0]
for vgroup_index, vgroup in enumerate(obj.vertex_groups):
# Skip bone weights:
vgroup_name = vgroup.name
if armature:
is_bone_weight = False
for bone in armature.pose.bones:
if bone.name == vgroup_name:
is_bone_weight = True
break
if is_bone_weight:
continue
if pattern_re is None:
# Translate user-friendly patterns to a regular expression:
# Join the whitespace-stripped, initially comma-separated
# entries to alternatives. Escape all characters except
# the star and replace that one with '.*?'.
pattern_re = '^(?:' + '|'.join(
map(lambda entry:
'.*?'.join(map(re.escape, entry.strip().split('*'))),
patterns_string.split(','))) + ')$'
if not re.match(pattern_re, vgroup_name):
continue
extra_vgroups.append((vgroup_name, vgroup_index))
return extra_vgroups
@_mesh
def vertex_group_data(mesh, index):
"""
Return vertex group data for each vertex. Vertices not in the group
get a zero value.
:param mesh:
:param index:
"""
group_data = []
for vertex in mesh.vertices:
weight = None
for group in vertex.groups:
if group.group == index:
weight = group.weight
group_data.append(weight or 0.0)
return group_data
@_mesh
def buffer_vertex_group_data(mesh, index):
"""
Return vertex group data for each deindexed vertex. Vertices not in the
group get a zero value.
:param mesh:
:param index:
"""
group_data = []
for face in mesh.tessfaces:
for vertex_index in face.vertices:
vertex = mesh.vertices[vertex_index]
weight = None
for group in vertex.groups:
if group.group == index:
weight = group.weight
group_data.append(weight or 0.0)
return group_data
@_mesh
def faces(mesh, options, material_list=None):
"""
:param mesh:
:param options:
:param material_list: (Default value = None)
"""
logger.debug("mesh.faces(%s, %s, materials=%s)",
mesh, options, materials)
material_list = material_list or []
vertex_uv = len(mesh.uv_textures) > 0
has_colors = len(mesh.vertex_colors) > 0
logger.info("Has UVs = %s", vertex_uv)
logger.info("Has vertex colours = %s", has_colors)
opt_colours = options[constants.COLORS] and has_colors
opt_uvs = options[constants.UVS] and vertex_uv
opt_materials = options.get(constants.FACE_MATERIALS)
opt_normals = options[constants.NORMALS]
logger.debug("Vertex colours enabled = %s", opt_colours)
logger.debug("UVS enabled = %s", opt_uvs)
logger.debug("Materials enabled = %s", opt_materials)
logger.debug("Normals enabled = %s", opt_normals)
uv_indices = _uvs(mesh)[1] if opt_uvs else None
vertex_normals = _normals(mesh) if opt_normals else None
vertex_colours = vertex_colors(mesh) if opt_colours else None
faces_data = []
colour_indices = {}
if vertex_colours:
logger.debug("Indexing colours")
for index, colour in enumerate(vertex_colours):
colour_indices[str(colour)] = index
normal_indices = {}
if vertex_normals:
logger.debug("Indexing normals")
for index, normal in enumerate(vertex_normals):
normal_indices[str(normal)] = index
logger.info("Parsing %d faces", len(mesh.tessfaces))
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count not in (3, 4):
logger.error("%d vertices for face %d detected",
vert_count,
face.index)
raise exceptions.NGonError("ngons are not supported")
mat_index = face.material_index is not None and opt_materials
mask = {
constants.QUAD: vert_count is 4,
constants.MATERIALS: mat_index,
constants.UVS: False,
constants.NORMALS: False,
constants.COLORS: False
}
face_data = []
face_data.extend([v for v in face.vertices])
if mask[constants.MATERIALS]:
for mat_index, mat in enumerate(material_list):
if mat[constants.DBG_INDEX] == face.material_index:
face_data.append(mat_index)
break
else:
error = ("Could not map the material index "
"for face %d" % face.index)
raise exceptions.MaterialError(error)
if uv_indices:
for index, uv_layer in enumerate(uv_indices):
layer = mesh.tessface_uv_textures[index]
for uv_data in layer.data[face.index].uv:
uv_tuple = (uv_data[0], uv_data[1])
uv_index = uv_layer[str(uv_tuple)]
face_data.append(uv_index)
mask[constants.UVS] = True
if vertex_normals:
for vertex in face.vertices:
normal = mesh.vertices[vertex].normal
normal = (normal.x, normal.y, normal.z)
face_data.append(normal_indices[str(normal)])
mask[constants.NORMALS] = True
if vertex_colours:
colours = mesh.tessface_vertex_colors.active.data[face.index]
for each in (colours.color1, colours.color2, colours.color3):
each = utilities.rgb2int(each)
face_data.append(colour_indices[str(each)])
mask[constants.COLORS] = True
if mask[constants.QUAD]:
colour = utilities.rgb2int(colours.color4)
face_data.append(colour_indices[str(colour)])
face_data.insert(0, utilities.bit_mask(mask))
faces_data.extend(face_data)
return faces_data
@_mesh
def morph_targets(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.morph_targets(%s, %s)", mesh, options)
obj = object_.objects_using_mesh(mesh)[0]
original_frame = context.scene.frame_current
frame_step = options.get(constants.FRAME_STEP, 1)
scene_frames = range(context.scene.frame_start,
context.scene.frame_end+1,
frame_step)
morphs = []
for frame in scene_frames:
logger.info("Processing data at frame %d", frame)
context.scene.frame_set(frame, 0.0)
morphs.append([])
vertices_ = object_.extract_mesh(obj, options).vertices[:]
for vertex in vertices_:
morphs[-1].extend([vertex.co.x, vertex.co.y, vertex.co.z])
context.scene.frame_set(original_frame, 0.0)
morphs_detected = False
for index, each in enumerate(morphs):
if index is 0:
continue
morphs_detected = morphs[index-1] != each
if morphs_detected:
logger.info("Valid morph target data detected")
break
else:
logger.info("No valid morph data detected")
return []
manifest = []
for index, morph in enumerate(morphs):
manifest.append({
constants.NAME: 'animation_%06d' % index,
constants.VERTICES: morph
})
return manifest
@_mesh
def materials(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.materials(%s, %s)", mesh, options)
indices = []
for face in mesh.tessfaces:
if face.material_index not in indices:
indices.append(face.material_index)
material_sets = [(mesh.materials[index], index) for index in indices]
materials_ = []
maps = options.get(constants.MAPS)
mix = options.get(constants.MIX_COLORS)
use_colors = options.get(constants.COLORS)
logger.info("Colour mix is set to %s", mix)
logger.info("Vertex colours set to %s", use_colors)
for mat, index in material_sets:
try:
dbg_color = constants.DBG_COLORS[index]
except IndexError:
dbg_color = constants.DBG_COLORS[0]
logger.info("Compiling attributes for %s", mat.name)
attributes = {
constants.COLOR_AMBIENT: material.ambient_color(mat),
constants.COLOR_EMISSIVE: material.emissive_color(mat),
constants.SHADING: material.shading(mat),
constants.OPACITY: material.opacity(mat),
constants.TRANSPARENT: material.transparent(mat),
constants.VISIBLE: material.visible(mat),
constants.WIREFRAME: material.wireframe(mat),
constants.BLENDING: material.blending(mat),
constants.DEPTH_TEST: material.depth_test(mat),
constants.DEPTH_WRITE: material.depth_write(mat),
constants.DBG_NAME: mat.name,
constants.DBG_COLOR: dbg_color,
constants.DBG_INDEX: index
}
if use_colors:
colors = material.use_vertex_colors(mat)
attributes[constants.VERTEX_COLORS] = colors
if (use_colors and mix) or (not use_colors):
colors = material.diffuse_color(mat)
attributes[constants.COLOR_DIFFUSE] = colors
if attributes[constants.SHADING] == constants.PHONG:
logger.info("Adding specular attributes")
attributes.update({
constants.SPECULAR_COEF: material.specular_coef(mat),
constants.COLOR_SPECULAR: material.specular_color(mat)
})
if mesh.show_double_sided:
logger.info("Double sided is on")
attributes[constants.DOUBLE_SIDED] = True
materials_.append(attributes)
if not maps:
continue
diffuse = _diffuse_map(mat)
if diffuse:
logger.info("Diffuse map found")
attributes.update(diffuse)
light = _light_map(mat)
if light:
logger.info("Light map found")
attributes.update(light)
specular = _specular_map(mat)
if specular:
logger.info("Specular map found")
attributes.update(specular)
if attributes[constants.SHADING] == constants.PHONG:
normal = _normal_map(mat)
if normal:
logger.info("Normal map found")
attributes.update(normal)
bump = _bump_map(mat)
if bump:
logger.info("Bump map found")
attributes.update(bump)
return materials_
@_mesh
def normals(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.normals(%s)", mesh)
normal_vectors = []
for vector in _normals(mesh):
normal_vectors.extend(vector)
return normal_vectors
@_mesh
def skin_weights(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_weights(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 1)
@_mesh
def skin_indices(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_indices(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 0)
@_mesh
def texture_registration(mesh):
"""
:param mesh:
"""
logger.debug("mesh.texture_registration(%s)", mesh)
materials_ = mesh.materials or []
registration = {}
funcs = (
(constants.MAP_DIFFUSE, material.diffuse_map),
(constants.SPECULAR_MAP, material.specular_map),
(constants.LIGHT_MAP, material.light_map),
(constants.BUMP_MAP, material.bump_map),
(constants.NORMAL_MAP, material.normal_map)
)
def _registration(file_path, file_name):
"""
:param file_path:
:param file_name:
"""
return {
'file_path': file_path,
'file_name': file_name,
'maps': []
}
logger.info("found %d materials", len(materials_))
for mat in materials_:
for (key, func) in funcs:
tex = func(mat)
if tex is None:
continue
logger.info("%s has texture %s", key, tex.name)
file_path = texture.file_path(tex)
file_name = texture.file_name(tex)
reg = registration.setdefault(
utilities.hash(file_path),
_registration(file_path, file_name))
reg["maps"].append(key)
return registration
@_mesh
def uvs(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.uvs(%s)", mesh)
uvs_ = []
for layer in _uvs(mesh)[0]:
uvs_.append([])
logger.info("Parsing UV layer %d", len(uvs_))
for pair in layer:
uvs_[-1].extend(pair)
return uvs_
@_mesh
def vertex_colors(mesh):
"""
:param mesh:
"""
logger.debug("mesh.vertex_colors(%s)", mesh)
vertex_colours = []
try:
vertex_colour = mesh.tessface_vertex_colors.active.data
except AttributeError:
logger.info("No vertex colours found")
return
for face in mesh.tessfaces:
colours = (vertex_colour[face.index].color1,
vertex_colour[face.index].color2,
vertex_colour[face.index].color3,
vertex_colour[face.index].color4)
for colour in colours:
colour = utilities.rgb2int((colour.r, colour.g, colour.b))
if colour not in vertex_colours:
vertex_colours.append(colour)
return vertex_colours
@_mesh
def vertices(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.vertices(%s)", mesh)
vertices_ = []
for vertex in mesh.vertices:
vertices_.extend((vertex.co.x, vertex.co.y, vertex.co.z))
return vertices_
def _normal_map(mat):
"""
:param mat:
"""
tex = material.normal_map(mat)
if tex is None:
return
logger.info("Found normal texture map %s", tex.name)
normal = {
constants.MAP_NORMAL:
texture.file_name(tex),
constants.MAP_NORMAL_FACTOR:
material.normal_scale(mat),
constants.MAP_NORMAL_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_NORMAL_WRAP: texture.wrap(tex),
constants.MAP_NORMAL_REPEAT: texture.repeat(tex)
}
return normal
def _bump_map(mat):
"""
:param mat:
"""
tex = material.bump_map(mat)
if tex is None:
return
logger.info("Found bump texture map %s", tex.name)
bump = {
constants.MAP_BUMP:
texture.file_name(tex),
constants.MAP_BUMP_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_BUMP_WRAP: texture.wrap(tex),
constants.MAP_BUMP_REPEAT: texture.repeat(tex),
constants.MAP_BUMP_SCALE:
material.bump_scale(mat),
}
return bump
def _specular_map(mat):
"""
:param mat:
"""
tex = material.specular_map(mat)
if tex is None:
return
logger.info("Found specular texture map %s", tex.name)
specular = {
constants.MAP_SPECULAR:
texture.file_name(tex),
constants.MAP_SPECULAR_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_SPECULAR_WRAP: texture.wrap(tex),
constants.MAP_SPECULAR_REPEAT: texture.repeat(tex)
}
return specular
def _light_map(mat):
"""
:param mat:
"""
tex = material.light_map(mat)
if tex is None:
return
logger.info("Found light texture map %s", tex.name)
light = {
constants.MAP_LIGHT:
texture.file_name(tex),
constants.MAP_LIGHT_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_LIGHT_WRAP: texture.wrap(tex),
constants.MAP_LIGHT_REPEAT: texture.repeat(tex)
}
return light
def _diffuse_map(mat):
"""
:param mat:
"""
tex = material.diffuse_map(mat)
if tex is None:
return
logger.info("Found diffuse texture map %s", tex.name)
diffuse = {
constants.MAP_DIFFUSE:
texture.file_name(tex),
constants.MAP_DIFFUSE_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_DIFFUSE_WRAP: texture.wrap(tex),
constants.MAP_DIFFUSE_REPEAT: texture.repeat(tex)
}
return diffuse
def _normals(mesh):
"""
:param mesh:
:rtype: []
"""
vectors = []
vectors_ = {}
for face in mesh.tessfaces:
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
str_vec = str(vector)
try:
vectors_[str_vec]
except KeyError:
vectors.append(vector)
vectors_[str_vec] = True
return vectors
def _uvs(mesh):
"""
:param mesh:
:rtype: [[], ...], [{}, ...]
"""
uv_layers = []
uv_indices = []
for layer in mesh.uv_layers:
uv_layers.append([])
uv_indices.append({})
index = 0
for uv_data in layer.data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
uv_key = str(uv_tuple)
try:
uv_indices[-1][uv_key]
except KeyError:
uv_indices[-1][uv_key] = index
uv_layers[-1].append(uv_tuple)
index += 1
return uv_layers, uv_indices
def _armature(mesh):
"""
:param mesh:
"""
obj = object_.objects_using_mesh(mesh)[0]
armature = obj.find_armature()
if armature:
logger.info("Found armature %s for %s", armature.name, obj.name)
else:
logger.info("Found no armature for %s", obj.name)
return armature
def _skinning_data(mesh, bone_map, influences, array_index):
"""
:param mesh:
:param bone_map:
:param influences:
:param array_index:
"""
armature = _armature(mesh)
manifest = []
if not armature:
return manifest
obj = object_.objects_using_mesh(mesh)[0]
logger.debug("Skinned object found %s", obj.name)
for vertex in mesh.vertices:
bone_array = []
for group in vertex.groups:
bone_array.append((group.group, group.weight))
bone_array.sort(key=operator.itemgetter(1), reverse=True)
for index in range(influences):
if index >= len(bone_array):
manifest.append(0)
continue
name = obj.vertex_groups[bone_array[index][0]].name
for bone_index, bone in enumerate(armature.pose.bones):
if bone.name != name:
continue
if array_index is 0:
entry = bone_map.get(bone_index, -1)
else:
entry = bone_array[index][1]
manifest.append(entry)
break
else:
manifest.append(0)
return manifest
def _pose_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
armature_matrix = armature.matrix_world
for bone_count, pose_bone in enumerate(armature.pose.bones):
armature_bone = pose_bone.bone
bone_index = None
if armature_bone.parent is None:
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_index = -1
else:
parent_bone = armature_bone.parent
parent_matrix = armature_matrix * parent_bone.matrix_local
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_matrix = parent_matrix.inverted() * bone_matrix
bone_index = index = 0
for pose_parent in armature.pose.bones:
armature_parent = pose_parent.bone.name
if armature_parent == parent_bone.name:
bone_index = index
index += 1
bone_map[bone_count] = bone_count
pos, rot, scl = bone_matrix.decompose()
bones_.append({
constants.PARENT: bone_index,
constants.NAME: armature_bone.name,
constants.POS: (pos.x, pos.z, -pos.y),
constants.ROTQ: (rot.x, rot.z, -rot.y, rot.w),
constants.SCL: (scl.x, scl.z, scl.y)
})
return bones_, bone_map
def _rest_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
bone_index_rel = 0
for bone in armature.data.bones:
logger.info("Parsing bone %s", bone.name)
if not bone.use_deform:
logger.debug("Ignoring bone %s at: %d",
bone.name, bone_index_rel)
continue
if bone.parent is None:
bone_pos = bone.head_local
bone_index = -1
else:
bone_pos = bone.head_local - bone.parent.head_local
bone_index = 0
index = 0
for parent in armature.data.bones:
if parent.name == bone.parent.name:
bone_index = bone_map.get(index)
index += 1
bone_world_pos = armature.matrix_world * bone_pos
x_axis = bone_world_pos.x
y_axis = bone_world_pos.z
z_axis = -bone_world_pos.y
logger.debug("Adding bone %s at: %s, %s",
bone.name, bone_index, bone_index_rel)
bone_map[bone_count] = bone_index_rel
bone_index_rel += 1
# @TODO: the rotq probably should not have these
# hard coded values
bones_.append({
constants.PARENT: bone_index,
constants.NAME: bone.name,
constants.POS: (x_axis, y_axis, z_axis),
constants.ROTQ: (0, 0, 0, 1)
})
bone_count += 1
return (bones_, bone_map)
| mit |
axiom-data-science/paegan | paegan/utils/asagreatcircle.py | 3 | 3265 | import math
from paegan.external.greatcircle import GreatCircle
import numpy as np
class AsaGreatCircle(object):
@classmethod
def great_circle(self, **kwargs):
"""
Named arguments:
distance = distance to traveled
azimuth = angle, in DECIMAL DEGREES of HEADING from NORTH
start_point = Location4D object representing the starting point
rmajor = radius of earth's major axis. default=6378137.0 (WGS84)
rminor = radius of earth's minor axis. default=6356752.3142 (WGS84)
Returns a dictionary with:
'latitude' in decimal degrees
'longitude' in decimal degrees
'reverse_azimuth' in decimal degrees
"""
distance = kwargs.pop('distance')
azimuth = kwargs.pop('azimuth')
starting = kwargs.pop('start_point')
rmajor = kwargs.pop('rmajor', 6378137.0)
rminor = kwargs.pop('rminor', 6356752.3142)
f = (rmajor - rminor) / rmajor
lat_result, lon_result, angle_result = GreatCircle.vinc_pt(f, rmajor, math.radians(starting.latitude), math.radians(starting.longitude), math.radians(azimuth), distance)
return {'latitude': math.degrees(lat_result), 'longitude': math.degrees(lon_result), 'reverse_azimuth': math.degrees(angle_result)}
@classmethod
def great_distance(self, **kwargs):
"""
Named arguments:
start_point = Location4D obect representing start point
end_point = Location4D obect representing end point
rmajor = radius of earth's major axis. default=6378137.0 (WGS84)
rminor = radius of earth's minor axis. default=6356752.3142 (WGS84)
Returns a dictionaty with:
'distance' in meters
'azimuth' in decimal degrees
'reverse_azimuth' in decimal degrees
"""
start_point = kwargs.pop('start_point', None)
end_point = kwargs.pop('end_point', None)
if start_point == None and end_point == None:
start_lat = kwargs.pop("start_lats")
start_lon = kwargs.pop("start_lons")
end_lat = kwargs.pop("end_lats")
end_lon = kwargs.pop("end_lons")
rmajor = kwargs.pop('rmajor', 6378137.0)
rminor = kwargs.pop('rminor', 6356752.3142)
f = (rmajor - rminor) / rmajor
if start_point != None and end_point != None:
distance, angle, reverse_angle = GreatCircle.vinc_dist(f, rmajor, math.radians(start_point.latitude),
math.radians(start_point.longitude),
math.radians(end_point.latitude),
math.radians(end_point.longitude))
else:
vector_dist = np.vectorize(GreatCircle.vinc_dist)
distance, angle, reverse_angle = vector_dist(f, rmajor, np.radians(start_lat), np.radians(start_lon),
np.radians(end_lat), np.radians(end_lon))
return {'distance': distance, 'azimuth': np.degrees(angle), 'reverse_azimuth': np.degrees(reverse_angle)}
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.