repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
mdimjasevic/debile | debile/master/interface.py | 4 | 13878 | # Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2014-2015 Clement Schreiner <clement@mux.me>
# Copyright (c) 2015 Lucas Kanashiro <kanashiro.duarte@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from debile.master.orm import (Person, Builder, Suite, Component, Arch, Check,
Group, GroupSuite, Source, Binary, Job)
from debile.master.keyrings import import_pgp, import_ssl, clean_ssl_keyring
from debile.master.utils import emit
from debian.debian_support import Version
from datetime import datetime, timedelta
import threading
import logging
NAMESPACE = threading.local()
def generic_method(fn):
def _(*args, **kwargs):
try:
return fn(*args, **kwargs)
except:
logger = logging.getLogger('debile')
logger.debug("Caught exception when processing xmlrpc request.", exc_info=True)
raise
return _
def builder_method(fn):
def _(*args, **kwargs):
if not NAMESPACE.machine:
raise Exception("You can't do that")
try:
return fn(*args, **kwargs)
except:
logger = logging.getLogger('debile')
logger.debug("Caught exception when processing xmlrpc request.", exc_info=True)
raise
return _
def user_method(fn):
def _(*args, **kwargs):
if not NAMESPACE.user:
raise Exception("You can't do that")
try:
return fn(*args, **kwargs)
except:
logger = logging.getLogger('debile')
logger.debug("Caught exception when processing xmlrpc request.", exc_info=True)
raise
return _
class DebileMasterInterface(object):
"""
This is the exposed interface for the builders. Code enhancing the server
should likely go here, unless you know what you're doing.
"""
shutdown_request = False
def __init__(self, ssl_keyring=None, pgp_keyring=None):
self.ssl_keyring = ssl_keyring
self.pgp_keyring = pgp_keyring
# Simple stuff.
@builder_method
def builder_whoami(self):
"""
ID check
"""
return NAMESPACE.machine.name
@user_method
def user_whoami(self):
"""
ID check
"""
return NAMESPACE.user.name
# The following trio of methods handle the job control.
@builder_method
def get_next_job(self, suites, components, arches, checks):
NAMESPACE.machine.last_ping = datetime.utcnow()
if self.__class__.shutdown_request:
return None
arches = [x for x in arches if x not in ["source", "all"]]
job = NAMESPACE.session.query(Job).join(Job.source).join(Source.group_suite).filter(
~Job.depedencies.any(),
Job.dose_report == None,
Job.assigned_at == None,
Job.finished_at == None,
Job.failed.is_(None),
GroupSuite.suite.has(Suite.name.in_(suites)),
Source.component.has(Component.name.in_(components)),
(Job.arch.has(Arch.name.in_(arches)) |
(Job.arch.has(Arch.name.in_(["source", "all"])) &
Source.affinity.has(Arch.name.in_(arches)))),
Job.check.has(Check.name.in_(checks)),
).order_by(
Job.assigned_count.asc(),
Source.uploaded_at.asc(),
).first()
if job is None:
return None
job.assigned_count += 1
job.assigned_at = datetime.utcnow()
job.builder = NAMESPACE.machine
emit('start', 'job', job.debilize())
return job.debilize()
@builder_method
def close_job(self, job_id, failed):
job = NAMESPACE.session.query(Job).get(job_id)
job.finished_at = datetime.utcnow()
emit('complete', 'job', job.debilize())
return True
@builder_method
def forfeit_job(self, job_id):
job = NAMESPACE.session.query(Job).get(job_id)
job.assigned_at = None
job.builder = None
emit('abort', 'job', job.debilize())
return True
# Useful methods below.
@generic_method
def get_group(self, group_id):
return NAMESPACE.session.query(Group).get(group_id).debilize()
@generic_method
def get_source(self, source_id):
return NAMESPACE.session.query(Source).get(source_id).debilize()
@generic_method
def get_binary(self, binary_id):
return NAMESPACE.session.query(Binary).get(binary_id).debilize()
@generic_method
def get_job(self, job_id):
return NAMESPACE.session.query(Job).get(job_id).debilize()
# Creating builders/users
@user_method
def create_builder(self, name, pgp, ssl=None, ip=None):
"""
:param str name: name of the builder
:param str pgp: path to the pgp public keyfile
:param str ssl: path to the ssl certificate
:param str ip: ip address of the builder
"""
if NAMESPACE.session.query(Builder).filter_by(name=name).first():
raise ValueError("Slave already exists.")
pgp = import_pgp(self.pgp_keyring, pgp)
if ssl is not None:
ssl = import_ssl(self.ssl_keyring, ssl, name)
b = Builder(name=name, maintainer=NAMESPACE.user, pgp=pgp, ssl=ssl,
last_ping=datetime.utcnow())
elif ip is not None:
b = Builder(name=name, maintainer=NAMESPACE.user, pgp=pgp, ip=ip,
last_ping=datetime.utcnow())
else:
raise ValueError("Need either ssl certificate or ip address")
NAMESPACE.session.add(b)
emit('create', 'slave', b.debilize())
return b.debilize()
@user_method
def update_builder_keys(self, name, pgp, ssl):
if self.ssl_keyring is None:
# TODO
raise Exception
builder = NAMESPACE.session.query(Builder).filter_by(name=name).first()
if not builder:
raise ValueError("No builder with name %s." % name)
builder.pgp = import_pgp(self.pgp_keyring, pgp)
builder.ssl = import_ssl(self.ssl_keyring, ssl, builder.name)
clean_ssl_keyring(self.ssl_keyring, NAMESPACE.session)
return builder.debilize()
@user_method
def disable_builder(self, name):
if self.ssl_keyring is None:
# TODO
raise Exception
builder = NAMESPACE.session.query(Builder).filter_by(name=name).first()
if not builder:
raise ValueError("No builder with name %s." % name)
builder.pgp = "0000000000000000DEADBEEF0000000000000000"
builder.ssl = "0000000000000000DEADBEEF0000000000000000"
clean_ssl_keyring(self.ssl_keyring, NAMESPACE.session)
return builder.debilize()
@user_method
def create_user(self, name, email, pgp, ssl=None, ip=None):
if NAMESPACE.session.query(Person).filter_by(email=email).first():
raise ValueError("User already exists.")
pgp = import_pgp(self.pgp_keyring, pgp)
if ssl is not None:
ssl = import_ssl(self.ssl_keyring, ssl, name, email)
p = Person(name=name, email=email, pgp=pgp, ssl=ssl, ip=ip)
NAMESPACE.session.add(p)
emit('create', 'user', p.debilize())
return p.debilize()
@user_method
def update_user_keys(self, email, pgp, ssl):
if self.ssl_keyring is None:
# TODO
raise Exception
user = NAMESPACE.session.query(Person).filter_by(email=email).first()
if not user:
raise ValueError("No user with email %s." % email)
user.pgp = import_pgp(self.pgp_keyring, pgp)
user.ssl = import_ssl(self.ssl_keyring, ssl, user.name, user.email)
clean_ssl_keyring(self.ssl_keyring, NAMESPACE.session)
return user.debilize()
@user_method
def disable_user(self, email):
if self.ssl_keyring is None:
# TODO
raise Exception
user = NAMESPACE.session.query(Builder).filter_by(email=email).first()
if not user:
raise ValueError("No user with email %s." % email)
user.pgp = "0000000000000000DEADBEEF0000000000000000"
user.ssl = "0000000000000000DEADBEEF0000000000000000"
clean_ssl_keyring(self.ssl_keyring, NAMESPACE.session)
return user.debilize()
# Re-run jobs
@user_method
def rerun_job(self, job_id):
job = NAMESPACE.session.query(Job).get(job_id)
if not job:
raise ValueError("No job with id %s." % job_id)
if any(job.built_binaries):
raise ValueError("Can not re-run a successfull build job.")
versions = NAMESPACE.session.query(Source.version).filter(
Source.group_suite == job.source.group_suite,
Source.name == job.source.name,
)
max_version = max([x[0] for x in versions], key=Version)
if job.source.version != max_version:
raise ValueError("Can not re-run a job for a superseeded source.")
job.failed = None
job.builder = None
job.assigned_at = None
job.finished_at = None
return job.debilize()
@user_method
def rerun_check(self, name):
check = NAMESPACE.session.query(Check).filter_by(name=name).one()
if check.build:
raise ValueError("Can not re-run a build check.")
jobs = NAMESPACE.session.query(Job).filter(
Job.check == check
)
for job in jobs:
versions = NAMESPACE.session.query(Source.version).filter(
Source.group_suite == job.source.group_suite,
Source.name == job.source.name,
)
max_version = max([x[0] for x in versions], key=Version)
if job.source.version != max_version:
continue
job.failed = None
job.builder = None
job.assigned_at = None
job.finished_at = None
@user_method
def retry_failed(self):
cutoff = datetime.utcnow() - timedelta(hours=1)
jobs = NAMESPACE.session.query(Job).filter(
~Job.built_binaries.any(),
Job.check.has(Check.build is True),
Job.finished_at != None,
Job.finished_at < cutoff,
)
for job in jobs:
versions = NAMESPACE.session.query(Source.version).filter(
Source.group_suite == job.source.group_suite,
Source.name == job.source.name
)
max_version = max([x[0] for x in versions], key=Version)
if job.source.version != max_version:
continue
job.failed = None
job.builder = None
job.assigned_at = None
job.finished_at = None
@user_method
def set_check(self, check, *args):
is_source = True if 'source' in args else False
is_binary = True if 'binary' in args else False
is_build = True if 'build' in args else False
check_query = NAMESPACE.session.query(Check).filter_by(name=check)
if check_query.count() > 0:
check = check_query.one()
else:
check = Check(name=check)
check.source = is_source
check.binary = is_binary
check.build = is_build
NAMESPACE.session.add(check)
return check.debilize()
@user_method
def enable_check(self, check, group, suite):
suite_query = NAMESPACE.session.query(Suite).filter_by(name=suite)
group_query = NAMESPACE.session.query(Group).filter_by(name=group)
if group_query.count() == 0:
raise ValueError('No group named %s' % group)
if suite_query.count() == 0:
raise ValueError('No suite name %s' % suite)
gs_query = NAMESPACE.session.query(GroupSuite).filter_by(
group=group_query.one(),
suite=suite_query.one(),
)
if (gs_query
.join(GroupSuite.checks)
.filter(Check.name == check)
.count() > 0):
raise ValueError('Check %s already setup for %s/%s' %
(check, group, suite))
check_query = NAMESPACE.session.query(Check).filter_by(name=check)
if check_query.count() == 0:
raise ValueError('Check %s does not exist' % check)
gs = gs_query.one()
gs.checks.append(check_query.one())
return 'Check %s added to %s.' % (check, gs)
@user_method
def list_checks(self, *args):
# FIXME: return an user-friendly table
# FIXME: list groups/suites the checks are enabled for
checks_query = NAMESPACE.session.query(Check)
return [c.debilize() for c in checks_query.all()]
| mit |
naziris/HomeSecPi | venv/lib/python2.7/site-packages/jinja2/compiler.py | 623 | 61785 | # -*- coding: utf-8 -*-
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain
from copy import deepcopy
from keyword import iskeyword as is_python_keyword
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape
from jinja2._compat import range_type, next, text_type, string_types, \
iteritems, NativeStringIO, imap
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
# does if 0: dummy(x) get us x into the scope?
def unoptimize_before_dead_code():
x = 42
def f():
if 0: dummy(x)
return f
# The getattr is necessary for pypy which does not set this attribute if
# no closure is on the function
unoptimize_before_dead_code = bool(
getattr(unoptimize_before_dead_code(), '__closure__', None))
def generate(node, environment, name, filename, stream=None,
defer_init=False):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = CodeGenerator(environment, name, filename, stream, defer_init)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if isinstance(value, (bool, int, float, complex, range_type,
Markup) + string_types):
return True
if isinstance(value, (tuple, list, set, frozenset)):
for item in value:
if not has_safe_repr(item):
return False
return True
elif isinstance(value, dict):
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class Identifiers(object):
"""Tracks the status of identifiers in frames."""
def __init__(self):
# variables that are known to be declared (probably from outer
# frames or because they are special for the frame)
self.declared = set()
# undeclared variables from outer scopes
self.outer_undeclared = set()
# names that are accessed without being explicitly declared by
# this one or any of the outer scopes. Names can appear both in
# declared and undeclared.
self.undeclared = set()
# names that are declared locally
self.declared_locally = set()
# names that are declared by parameters
self.declared_parameter = set()
def add_special(self, name):
"""Register a special name like `loop`."""
self.undeclared.discard(name)
self.declared.add(name)
def is_declared(self, name):
"""Check if a name is declared in this or an outer scope."""
if name in self.declared_locally or name in self.declared_parameter:
return True
return name in self.declared
def copy(self):
return deepcopy(self)
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.identifiers = Identifiers()
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# a set of actually assigned names
self.assigned_names = set()
# the parent of this frame
self.parent = parent
if parent is not None:
self.identifiers.declared.update(
parent.identifiers.declared |
parent.identifiers.declared_parameter |
parent.assigned_names
)
self.identifiers.outer_undeclared.update(
parent.identifiers.undeclared -
self.identifiers.declared
)
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.identifiers = object.__new__(self.identifiers.__class__)
rv.identifiers.__dict__.update(self.identifiers.__dict__)
return rv
def inspect(self, nodes):
"""Walk the node and check for identifiers. If the scope is hard (eg:
enforce on a python level) overrides from outer scopes are tracked
differently.
"""
visitor = FrameIdentifierVisitor(self.identifiers)
for node in nodes:
visitor.visit(node)
def find_shadowed(self, extra=()):
"""Find all the shadowed names. extra is an iterable of variables
that may be defined with `add_special` which may occour scoped.
"""
i = self.identifiers
return (i.declared | i.outer_undeclared) & \
(i.declared_locally | i.declared_parameter) | \
set(x for x in extra if i.is_declared(x))
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class FrameIdentifierVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, identifiers):
self.identifiers = identifiers
def visit_Name(self, node):
"""All assignments to names go through this function."""
if node.ctx == 'store':
self.identifiers.declared_locally.add(node.name)
elif node.ctx == 'param':
self.identifiers.declared_parameter.add(node.name)
elif node.ctx == 'load' and not \
self.identifiers.is_declared(node.name):
self.identifiers.undeclared.add(node.name)
def visit_If(self, node):
self.visit(node.test)
real_identifiers = self.identifiers
old_names = real_identifiers.declared_locally | \
real_identifiers.declared_parameter
def inner_visit(nodes):
if not nodes:
return set()
self.identifiers = real_identifiers.copy()
for subnode in nodes:
self.visit(subnode)
rv = self.identifiers.declared_locally - old_names
# we have to remember the undeclared variables of this branch
# because we will have to pull them.
real_identifiers.undeclared.update(self.identifiers.undeclared)
self.identifiers = real_identifiers
return rv
body = inner_visit(node.body)
else_ = inner_visit(node.else_ or ())
# the differences between the two branches are also pulled as
# undeclared variables
real_identifiers.undeclared.update(body.symmetric_difference(else_) -
real_identifiers.declared)
# remember those that are declared.
real_identifiers.declared_locally.update(body | else_)
def visit_Macro(self, node):
self.identifiers.declared_locally.add(node.name)
def visit_Import(self, node):
self.generic_visit(node)
self.identifiers.declared_locally.add(node.target)
def visit_FromImport(self, node):
self.generic_visit(node)
for name in node.names:
if isinstance(name, tuple):
self.identifiers.declared_locally.add(name[1])
else:
self.identifiers.declared_locally.add(name)
def visit_Assign(self, node):
"""Visit assignments in the correct order."""
self.visit(node.node)
self.visit(node.target)
def visit_For(self, node):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter)
def visit_CallBlock(self, node):
self.visit(node.call)
def visit_FilterBlock(self, node):
self.visit(node.filter)
def visit_Scope(self, node):
"""Stop visiting at scopes."""
def visit_Block(self, node):
"""Stop visiting at blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False):
if stream is None:
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame):
"""Return the buffer contents of the frame."""
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
else:
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically
unless the force_generator parameter is set to False.
"""
if frame.buffer is None:
self.writeline('if 0: yield None')
else:
self.writeline('pass')
try:
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_locals(self, frame):
"""Pull all the references identifiers into the local scope."""
for name in frame.identifiers.undeclared:
self.writeline('l_%s = context.resolve(%r)' % (name, name))
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def unoptimize_scope(self, frame):
"""Disable Python optimizations for the frame."""
# XXX: this is not that nice but it has no real overhead. It
# mainly works because python finds the locals before dead code
# is removed. If that breaks we have to add a dummy function
# that just accepts the arguments and does nothing.
if frame.identifiers.declared:
self.writeline('%sdummy(%s)' % (
unoptimize_before_dead_code and 'if 0: ' or '',
', '.join('l_' + name for name in frame.identifiers.declared)
))
def push_scope(self, frame, extra_vars=()):
"""This function returns all the shadowed variables in a dict
in the form name: alias and will write the required assignments
into the current scope. No indentation takes place.
This also predefines locally declared variables from the loop
body because under some circumstances it may be the case that
`extra_vars` is passed to `Frame.find_shadowed`.
"""
aliases = {}
for name in frame.find_shadowed(extra_vars):
aliases[name] = ident = self.temporary_identifier()
self.writeline('%s = l_%s' % (ident, name))
to_declare = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_declare.add('l_' + name)
if to_declare:
self.writeline(' = '.join(to_declare) + ' = missing')
return aliases
def pop_scope(self, aliases, frame):
"""Restore all aliases and delete unused variables."""
for name, alias in iteritems(aliases):
self.writeline('l_%s = %s' % (name, alias))
to_delete = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_delete.add('l_' + name)
if to_delete:
# we cannot use the del statement here because enclosed
# scopes can trigger a SyntaxError:
# a = 42; b = lambda: a; del a
self.writeline(' = '.join(to_delete) + ' = missing')
def function_scoping(self, node, frame, children=None,
find_special=True):
"""In Jinja a few statements require the help of anonymous
functions. Those are currently macros and call blocks and in
the future also recursive loops. As there is currently
technical limitation that doesn't allow reading and writing a
variable in a scope where the initial value is coming from an
outer scope, this function tries to fall back with a common
error message. Additionally the frame passed is modified so
that the argumetns are collected and callers are looked up.
This will return the modified frame.
"""
# we have to iterate twice over it, make sure that works
if children is None:
children = node.iter_child_nodes()
children = list(children)
func_frame = frame.inner()
func_frame.inspect(children)
# variables that are undeclared (accessed before declaration) and
# declared locally *and* part of an outside scope raise a template
# assertion error. Reason: we can't generate reasonable code from
# it without aliasing all the variables.
# this could be fixed in Python 3 where we have the nonlocal
# keyword or if we switch to bytecode generation
overridden_closure_vars = (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared &
(func_frame.identifiers.declared_locally |
func_frame.identifiers.declared_parameter)
)
if overridden_closure_vars:
self.fail('It\'s not possible to set and access variables '
'derived from an outer scope! (affects: %s)' %
', '.join(sorted(overridden_closure_vars)), node.lineno)
# remove variables from a closure from the frame's undeclared
# identifiers.
func_frame.identifiers.undeclared -= (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared
)
# no special variables for this scope, abort early
if not find_special:
return func_frame
func_frame.accesses_kwargs = False
func_frame.accesses_varargs = False
func_frame.accesses_caller = False
func_frame.arguments = args = ['l_' + x.name for x in node.args]
undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
func_frame.accesses_caller = True
func_frame.identifiers.add_special('caller')
args.append('l_caller')
if 'kwargs' in undeclared:
func_frame.accesses_kwargs = True
func_frame.identifiers.add_special('kwargs')
args.append('l_kwargs')
if 'varargs' in undeclared:
func_frame.accesses_varargs = True
func_frame.identifiers.add_special('varargs')
args.append('l_varargs')
return func_frame
def macro_body(self, node, frame, children=None):
"""Dump the function def of a macro or call block."""
frame = self.function_scoping(node, frame, children)
# macros are delayed, they never require output checks
frame.require_output_check = False
args = frame.arguments
# XXX: this is an ugly fix for the loop nesting bug
# (tests.test_old_bugs.test_loop_call_bug). This works around
# a identifier nesting problem we have in general. It's just more
# likely to happen in loops which is why we work around it. The
# real solution would be "nonlocal" all the identifiers that are
# leaking into a new python frame and might be used both unassigned
# and assigned.
if 'loop' in frame.identifiers.declared:
args = args + ['l_loop=l_loop']
self.writeline('def macro(%s):' % ', '.join(args), node)
self.indent()
self.buffer(frame)
self.pull_locals(frame)
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame)
self.outdent()
return frame
def macro_def(self, node, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in node.args)
name = getattr(node, 'name', None)
if len(node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), (' %
(name, arg_tuple))
for arg in node.defaults:
self.visit(arg, frame)
self.write(', ')
self.write('), %r, %r, %r)' % (
bool(frame.accesses_kwargs),
bool(frame.accesses_varargs),
bool(frame.accesses_caller)
))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import division')
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if not unoptimize_before_dead_code:
self.writeline('dummy = lambda *x: None')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('def root(context%s):' % envenv, extra=1)
# process the root
frame = Frame(eval_ctx)
frame.inspect(node.body)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
self.indent()
if have_extends:
self.writeline('parent_template = None')
if 'self' in find_undeclared(node.body, ('self',)):
frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
self.pull_locals(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('for event in parent_template.'
'root_render_func(context):')
self.indent()
self.writeline('yield event')
self.outdent(2 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
block_frame = Frame(eval_ctx)
block_frame.inspect(block.body)
block_frame.block = name
self.writeline('def block_%s(context%s):' % (name, envenv),
block, 1)
self.indent()
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
block_frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
if 'super' in undeclared:
block_frame.identifiers.add_special('super')
self.writeline('l_super = context.super(%r, '
'block_%s)' % (name, name))
self.pull_locals(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 1
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and 'context.derived(locals())' or 'context'
self.writeline('for event in context.blocks[%r][0](%s):' % (
node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.with_context:
self.unoptimize_scope(frame)
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
if node.with_context:
self.writeline('for event in template.root_render_func('
'template.new_context(context.parent, True, '
'locals())):')
else:
self.writeline('for event in template.module._body_stream:')
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
if node.with_context:
self.unoptimize_scope(frame)
self.writeline('l_%s = ' % node.target, node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True, locals())')
else:
self.write('module')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
frame.assigned_names.add(node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True)')
else:
self.write('module')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('l_%s = getattr(included_template, '
'%r, missing)' % (alias, name))
self.writeline('if l_%s is missing:' % alias)
self.indent()
self.writeline('l_%s = environment.undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(alias, 'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
frame.assigned_names.add(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: l_%s' % (name, name) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names)))
def visit_For(self, node, frame):
# when calculating the nodes for the inner frame we have to exclude
# the iterator contents from it
children = node.iter_child_nodes(exclude=('iter',))
if node.recursive:
loop_frame = self.function_scoping(node, frame, children,
find_special=False)
else:
loop_frame = frame.inner()
loop_frame.inspect(children)
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if not node.recursive:
aliases = self.push_scope(loop_frame, ('loop',))
# otherwise we set up a buffer and add a function def
else:
self.writeline('def loop(reciter, loop_render_func, depth=0):', node)
self.indent()
self.buffer(loop_frame)
aliases = {}
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline('l_loop = missing')
loop_frame.identifiers.add_special('loop')
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
self.pull_locals(loop_frame)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
# Create a fake parent loop if the else or test section of a
# loop is accessing the special loop variable and no parent loop
# exists.
if 'loop' not in aliases and 'loop' in find_undeclared(
node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
self.writeline("l_loop = environment.undefined(%r, name='loop')" %
("'loop' is undefined. the filter section of a loop as well "
"as the else block don't have access to the special 'loop'"
" variable of the current loop. Because there is no parent "
"loop it's undefined. Happened in loop on %s" %
self.position(node)))
self.writeline('for ', node)
self.visit(node.target, loop_frame)
self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
# if we have an extened loop and a node test, we filter in the
# "outer frame".
if extended_loop and node.test is not None:
self.write('(')
self.visit(node.target, loop_frame)
self.write(' for ')
self.visit(node.target, loop_frame)
self.write(' in ')
if node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
self.write(' if (')
test_frame = loop_frame.copy()
self.visit(node.test, test_frame)
self.write('))')
elif node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
if node.recursive:
self.write(', loop_render_func, depth):')
else:
self.write(extended_loop and '):' or ':')
# tests in not extended loops become a continue
if not extended_loop and node.test is not None:
self.indent()
self.writeline('if not ')
self.visit(node.test, loop_frame)
self.write(':')
self.indent()
self.writeline('continue')
self.outdent(2)
self.indent()
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.blockvisit(node.else_, loop_frame)
self.outdent()
# reset the aliases if there are any.
if not node.recursive:
self.pop_scope(aliases, loop_frame)
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
self.write('loop(')
self.visit(node.iter, frame)
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('l_%s = ' % node.name)
self.macro_def(node, macro_frame)
frame.assigned_names.add(node.name)
def visit_CallBlock(self, node, frame):
children = node.iter_child_nodes(exclude=('call',))
call_frame = self.macro_body(node, frame, children)
self.writeline('caller = ')
self.macro_def(node, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, call_frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(filter_frame)
self.pull_locals(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.pop_scope(aliases, filter_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
if self.environment.finalize:
finalize = lambda x: text_type(self.environment.finalize(x))
else:
finalize = text_type
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ', ')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(', ')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
idx = -1
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def visit_Assign(self, node, frame):
self.newline(node)
# toplevel assignments however go into the local namespace and
# the current template's context. We create a copy of the frame
# here and add a set so that the Name visitor can add the assigned
# names here.
if frame.toplevel:
assignment_frame = frame.copy()
assignment_frame.toplevel_assignments = set()
else:
assignment_frame = frame
self.visit(node.target, assignment_frame)
self.write(' = ')
self.visit(node.node, frame)
# make sure toplevel assignments are added to the context.
if frame.toplevel:
public_names = [x for x in assignment_frame.toplevel_assignments
if not x.startswith('_')]
if len(assignment_frame.toplevel_assignments) == 1:
name = next(iter(assignment_frame.toplevel_assignments))
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(assignment_frame.toplevel_assignments):
if idx:
self.write(', ')
self.write('%r: l_%s' % (name, name))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names)))
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
frame.toplevel_assignments.add(node.name)
self.write('l_' + node.name)
frame.assigned_names.add(node.name)
def visit_Const(self, node, frame):
val = node.value
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
def visit_Filter(self, node, frame):
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('environment.undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(scope_frame)
self.pull_locals(scope_frame)
self.blockvisit(node.body, scope_frame)
self.pop_scope(aliases, scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
safed_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(safed_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
| apache-2.0 |
dekatzenel/team-k | mds/api/v1/urls.py | 3 | 3898 | """The Django url mappings for 1.x mds.
:Authors: Sana dev team
:Version: 1.1
"""
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'',
# Old home for for connection check
# url(r'^$',
# 'sana.mrs.views.home',
# name="sana-home"),
url(r'^notifications/$',
'sana.mrs.views.list_notifications',
name="sana-list-notifications"),
url(r'^notifications/submit/$',
'sana.mrs.json.notification_submit',
name="sana-api-notification-submit"),
url(r'^notifications/submit/email/$',
'sana.mrs.json.email_notification_submit',
name="sana-api-email-notification-submit"),
url(r'^json/patient/list/$',
'sana.mrs.json.patient_list',
name="sana-json-patient-list"),
url(r'^json/patient/(?P<id>[0-9-]+)/$',
'sana.mrs.json.patient_get',
name="sana-json-patient-get"),
url(r'^json/validate/credentials/$',
'sana.mrs.json.validate_credentials',
name = "sana-json-validate-credentials"),
url(r'^procedure/submit/$',
'sana.mrs.views.procedure_submit',
name="sana-html-procedure-submit"),
url(r'^json/procedure/submit/$',
'sana.mrs.json.procedure_submit',
name="sana-json-procedure-submit"),
url(r'^json/binary/submit/$',
'sana.mrs.json.binary_submit',
name="sana-json-binary-submit"),
url(r'^json/binarychunk/submit/$',
'sana.mrs.json.binarychunk_submit',
name="sana-json-binarychunk-submit"),
url(r'^json/textchunk/submit/$',
'sana.mrs.json.binarychunk_hack_submit',
name="sana-json-binarychunk-hack-submit"),
url(r'^json/eventlog/submit/$',
'sana.mrs.json.eventlog_submit',
name="sana-json-eventlog-submit"),
)
# These were never really used in v1 but leaving as commented so that
# we at least have a record of them
# url(r'^json/notifications/$',
# 'sana.mrs.json.notification_list',
# name="sana-json-notification-list"),
# url(r'^json/procedure/list/$',
# 'sana.mrs.json.procedure_list',
# name="sana-json-procedure-list"),
# url(r'^json/procedure/(?P<id>\d+)/$',
# 'sana.mrs.json.procedure_get',
# name="sana-json-procedure-get"),
# ----------------------- Manila Branch ------------------------------
# Originally developed for saved procedure syncing
#v1manila_patterns = patterns(
# '',
# url(r'^json/saved_procedure/(?P<id>[0-9-]+)/$',
# 'sana.mrs.json.saved_procedure_get',
# name="sana-json-saved-procedure-get"),
# url(r'^json/sync_encounters/(?P<patient_id>[0-9-]+)/$',
# 'sana.mrs.json.syc_encounters',
# name="sana-json-encounters-sync"),
# url(r'^json/sync_encounters/(?P<patient_id>[0-9-]+)/(?P<encounters>[0-9:-]+)$',
# 'sana.mrs.json.syc_encounters',
# name="sana-json-encounters-sync"),
# )
# ---------------------------- LOGGING ---------------------------------
# Orginal logging patterns, leaving commented out for reference
#from sana.mrs.models import RequestLog
#log_list = {
# 'queryset': RequestLog.objects.all().order_by('-timestamp'),
# 'paginate_by': 100,
# }
#log_item = {
# 'queryset': RequestLog.objects.all(),
# }
#log_detail = {
# 'queryset': RequestLog.objects.all(),
# }
#v1log_patterns = patterns(
# '',
# url(r'^log-detail/$',
# 'sana.mrs.util.log_json_detail',
# name="log-json-detail-noarg"),
#
# url(r'^log-detail/(?P<log_id>\d+)$',
# 'sana.mrs.util.log_json_detail',
# name="log-json-detail"),
#
# url(r'^log/$',
# 'django.views.generic.list_detail.object_list',
# log_list,
# name="log-view"),
# url(r'^log/(?P<object_id>\d+)$',
# 'django.views.generic.list_detail.object_detail',
# log_item,
# name="log-item-view"),
#)
| bsd-3-clause |
thumbimigwe/echorizr | lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/io.py | 309 | 9480 | import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils import six
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
class WKTOutputDim(GEOSFuncFactory):
def get_func(self, *args, **kwargs):
try:
return super(WKTOutputDim, self).get_func(*args, **kwargs)
except AttributeError:
# GEOSWKTWriter_get/setOutputDimension has been introduced in GEOS 3.3.0
# Always return 2 if not available
return {
'GEOSWKTWriter_getOutputDimension': lambda ptr: 2,
'GEOSWKTWriter_setOutputDimension': lambda ptr, dim: None,
}.get(self.func_name)
wkt_writer_get_outdim = WKTOutputDim(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = WKTOutputDim(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self._destructor.func = self._destructor.get_func(
*self._destructor.args, **self._destructor.kwargs
)
def __del__(self):
# Cleaning up with the appropriate destructor.
if self._ptr:
self._destructor(self._ptr)
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
_destructor = wkt_reader_destroy
ptr_type = WKT_READ_PTR
def read(self, wkt):
if not isinstance(wkt, (bytes, six.string_types)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
_destructor = wkb_reader_destroy
ptr_type = WKB_READ_PTR
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, six.memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, six.string_types)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
_destructor = wkt_writer_destroy
ptr_type = WKT_WRITE_PTR
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
_destructor = wkb_writer_destroy
ptr_type = WKB_WRITE_PTR
def write(self, geom):
"Returns the WKB representation of the given geometry."
return six.memoryview(wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t())))
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
return wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
def _get_outdim(self):
return wkb_writer_get_outdim(self.ptr)
def _set_outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
outdim = property(_get_outdim, _set_outdim)
# Property for getting/setting the include srid flag.
def _get_include_srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
def _set_include_srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
srid = property(_get_include_srid, _set_include_srid)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter()
thread_context.wkt_w.outdim = dim
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter()
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter()
thread_context.ewkb_w.srid = True
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
| mit |
ccccccccccc/mitmproxy | test/test_filt.py | 14 | 7900 | import cStringIO
from libmproxy import filt, flow
from libmproxy.protocol import http
from libmproxy.models import Error
from netlib.http import Headers
import tutils
class TestParsing:
def _dump(self, x):
c = cStringIO.StringIO()
x.dump(fp=c)
assert c.getvalue()
def test_parse_err(self):
assert filt.parse("~h [") is None
def test_simple(self):
assert not filt.parse("~b")
assert filt.parse("~q")
assert filt.parse("~c 10")
assert filt.parse("~m foobar")
assert filt.parse("~u foobar")
assert filt.parse("~q ~c 10")
p = filt.parse("~q ~c 10")
self._dump(p)
assert len(p.lst) == 2
def test_naked_url(self):
a = filt.parse("foobar ~h rex")
assert a.lst[0].expr == "foobar"
assert a.lst[1].expr == "rex"
self._dump(a)
def test_quoting(self):
a = filt.parse("~u 'foo ~u bar' ~u voing")
assert a.lst[0].expr == "foo ~u bar"
assert a.lst[1].expr == "voing"
self._dump(a)
a = filt.parse("~u foobar")
assert a.expr == "foobar"
a = filt.parse(r"~u 'foobar\"\''")
assert a.expr == "foobar\"'"
a = filt.parse(r'~u "foo \'bar"')
assert a.expr == "foo 'bar"
def test_nesting(self):
a = filt.parse("(~u foobar & ~h voing)")
assert a.lst[0].expr == "foobar"
self._dump(a)
def test_not(self):
a = filt.parse("!~h test")
assert a.itm.expr == "test"
a = filt.parse("!(~u test & ~h bar)")
assert a.itm.lst[0].expr == "test"
self._dump(a)
def test_binaryops(self):
a = filt.parse("~u foobar | ~h voing")
isinstance(a, filt.FOr)
self._dump(a)
a = filt.parse("~u foobar & ~h voing")
isinstance(a, filt.FAnd)
self._dump(a)
def test_wideops(self):
a = filt.parse("~hq 'header: qvalue'")
assert isinstance(a, filt.FHeadRequest)
self._dump(a)
class TestMatching:
def req(self):
headers = Headers(header="qvalue")
req = http.HTTPRequest(
"absolute",
"GET",
"http",
"host",
80,
"/path",
b"HTTP/1.1",
headers,
"content_request",
None,
None
)
f = http.HTTPFlow(tutils.tclient_conn(), None)
f.request = req
return f
def resp(self):
f = self.req()
headers = Headers([["header_response", "svalue"]])
f.response = http.HTTPResponse(
b"HTTP/1.1",
200,
"OK",
headers,
"content_response",
None,
None)
return f
def err(self):
f = self.req()
f.error = Error("msg")
return f
def q(self, q, o):
return filt.parse(q)(o)
def test_asset(self):
s = self.resp()
assert not self.q("~a", s)
s.response.headers["content-type"] = "text/javascript"
assert self.q("~a", s)
def test_fcontenttype(self):
q = self.req()
s = self.resp()
assert not self.q("~t content", q)
assert not self.q("~t content", s)
q.request.headers["content-type"] = "text/json"
assert self.q("~t json", q)
assert self.q("~tq json", q)
assert not self.q("~ts json", q)
s.response.headers["content-type"] = "text/json"
assert self.q("~t json", s)
del s.response.headers["content-type"]
s.request.headers["content-type"] = "text/json"
assert self.q("~t json", s)
assert self.q("~tq json", s)
assert not self.q("~ts json", s)
def test_freq_fresp(self):
q = self.req()
s = self.resp()
assert self.q("~q", q)
assert not self.q("~q", s)
assert not self.q("~s", q)
assert self.q("~s", s)
def test_ferr(self):
e = self.err()
assert self.q("~e", e)
def test_head(self):
q = self.req()
s = self.resp()
assert not self.q("~h nonexistent", q)
assert self.q("~h qvalue", q)
assert self.q("~h header", q)
assert self.q("~h 'header: qvalue'", q)
assert self.q("~h 'header: qvalue'", s)
assert self.q("~h 'header_response: svalue'", s)
assert self.q("~hq 'header: qvalue'", s)
assert not self.q("~hq 'header_response: svalue'", s)
assert self.q("~hq 'header: qvalue'", q)
assert not self.q("~hq 'header_request: svalue'", q)
assert not self.q("~hs 'header: qvalue'", s)
assert self.q("~hs 'header_response: svalue'", s)
assert not self.q("~hs 'header: qvalue'", q)
def match_body(self, q, s):
assert not self.q("~b nonexistent", q)
assert self.q("~b content", q)
assert self.q("~b response", s)
assert self.q("~b content_request", s)
assert self.q("~bq content", q)
assert self.q("~bq content", s)
assert not self.q("~bq response", q)
assert not self.q("~bq response", s)
assert not self.q("~bs content", q)
assert self.q("~bs content", s)
assert not self.q("~bs nomatch", s)
assert not self.q("~bs response", q)
assert self.q("~bs response", s)
def test_body(self):
q = self.req()
s = self.resp()
self.match_body(q, s)
q.request.encode("gzip")
s.request.encode("gzip")
s.response.encode("gzip")
self.match_body(q, s)
def test_method(self):
q = self.req()
s = self.resp()
assert self.q("~m get", q)
assert not self.q("~m post", q)
q.request.method = "oink"
assert not self.q("~m get", q)
def test_domain(self):
q = self.req()
s = self.resp()
assert self.q("~d host", q)
assert not self.q("~d none", q)
def test_url(self):
q = self.req()
s = self.resp()
assert self.q("~u host", q)
assert self.q("~u host/path", q)
assert not self.q("~u moo/path", q)
assert self.q("~u host", s)
assert self.q("~u host/path", s)
assert not self.q("~u moo/path", s)
def test_code(self):
q = self.req()
s = self.resp()
assert not self.q("~c 200", q)
assert self.q("~c 200", s)
assert not self.q("~c 201", s)
def test_src(self):
q = self.req()
assert self.q("~src address", q)
assert not self.q("~src foobar", q)
assert self.q("~src :22", q)
assert not self.q("~src :99", q)
assert self.q("~src address:22", q)
def test_dst(self):
q = self.req()
q.server_conn = tutils.tserver_conn()
assert self.q("~dst address", q)
assert not self.q("~dst foobar", q)
assert self.q("~dst :22", q)
assert not self.q("~dst :99", q)
assert self.q("~dst address:22", q)
def test_and(self):
s = self.resp()
assert self.q("~c 200 & ~h head", s)
assert self.q("~c 200 & ~h head", s)
assert not self.q("~c 200 & ~h nohead", s)
assert self.q("(~c 200 & ~h head) & ~b content", s)
assert not self.q("(~c 200 & ~h head) & ~b nonexistent", s)
assert not self.q("(~c 200 & ~h nohead) & ~b content", s)
def test_or(self):
s = self.resp()
assert self.q("~c 200 | ~h nohead", s)
assert self.q("~c 201 | ~h head", s)
assert not self.q("~c 201 | ~h nohead", s)
assert self.q("(~c 201 | ~h nohead) | ~s", s)
def test_not(self):
s = self.resp()
assert not self.q("! ~c 200", s)
assert self.q("! ~c 201", s)
assert self.q("!~c 201 !~c 202", s)
assert not self.q("!~c 201 !~c 200", s)
| mit |
whygee/oppia | core/domain/stats_domain.py | 15 | 3016 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain object for statistics models."""
__author__ = 'Sean Lip'
import copy
import operator
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
class StateRuleAnswerLog(object):
"""Domain object that stores answers which match different state rules.
All methods and properties in this file should be independent of the
specific storage model used.
"""
def __init__(self, answers):
# This dict represents a log of answers that hit this rule and that
# have not been resolved. The keys of this dict are the answers encoded
# as HTML strings, and the values are integer counts representing how
# many times the answer has been entered.
self.answers = copy.deepcopy(answers)
@property
def total_answer_count(self):
"""Total count of answers for this rule that have not been resolved."""
# TODO(sll): Cache this computed property.
total_count = 0
for answer, count in self.answers.iteritems():
total_count += count
return total_count
@classmethod
def get_multi(cls, exploration_id, rule_data):
"""Gets domain objects corresponding to the given rule data.
Args:
exploration_id: the exploration id
rule_data: a list of dicts, each with the following keys:
(state_name, rule_str).
"""
# TODO(sll): Should each rule_str be unicode instead?
answer_log_models = (
stats_models.StateRuleAnswerLogModel.get_or_create_multi(
exploration_id, rule_data))
return [cls(answer_log_model.answers)
for answer_log_model in answer_log_models]
@classmethod
def get(cls, exploration_id, state_name, rule_str):
# TODO(sll): Deprecate this method.
return cls.get_multi(exploration_id, [{
'state_name': state_name,
'rule_str': rule_str
}])[0]
def get_top_answers(self, N):
"""Returns the top N answers.
Args:
N: the maximum number of answers to return.
Returns:
A list of (answer, count) tuples for the N answers with the highest
counts.
"""
return sorted(
self.answers.iteritems(), key=operator.itemgetter(1),
reverse=True)[:N]
| apache-2.0 |
PIVX-Project/PIVX | test/util/bitcoin-util-test.py | 6 | 6722 | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for pivx utils.
Runs automatically during `make check`.
Can also be run manually."""
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, pivx-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| mit |
mozilla/verbatim | vendor/lib/python/django/middleware/gzip.py | 98 | 1747 | import re
from django.utils.text import compress_string
from django.utils.cache import patch_vary_headers
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if len(response.content) < 200:
return response
patch_vary_headers(response, ('Accept-Encoding',))
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
# MSIE have issues with gzipped response of various content types.
if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
ctype = response.get('Content-Type', '').lower()
if not ctype.startswith("text/") or "javascript" in ctype:
return response
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(response.content)
if len(compressed_content) >= len(response.content):
return response
if response.has_header('ETag'):
response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
response.content = compressed_content
response['Content-Encoding'] = 'gzip'
response['Content-Length'] = str(len(response.content))
return response
| gpl-2.0 |
mazaclub/electrum-nmc | lib/qrscanner.py | 3 | 1278 | import os
from i18n import _
try:
import zbar
except ImportError:
zbar = None
proc = None
def scan_qr(config):
global proc
if not zbar:
raise BaseException("\n".join([_("Cannot start QR scanner."),_("The zbar package is not available."),_("On Linux, try 'sudo pip install zbar'")]))
if proc is None:
device = config.get("video_device", "default")
if device == 'default':
device = ''
proc = zbar.Processor()
proc.init(video_device=device)
proc.visible = True
while True:
try:
proc.process_one()
except Exception:
# User closed the preview window
return ""
for r in proc.results:
if str(r.type) != 'QRCODE':
continue
# hiding the preview window stops the camera
proc.visible = False
return r.data
def _find_system_cameras():
device_root = "/sys/class/video4linux"
devices = {} # Name -> device
if os.path.exists(device_root):
for device in os.listdir(device_root):
name = open(os.path.join(device_root, device, 'name')).read()
name = name.strip('\n')
devices[name] = os.path.join("/dev",device)
return devices
| gpl-3.0 |
michaelBenin/sqlalchemy | doc/build/conf.py | 1 | 10689 | # -*- coding: utf-8 -*-
#
# SQLAlchemy documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 26 19:50:10 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../lib'))
sys.path.insert(0, os.path.abspath('../..')) # examples
sys.path.insert(0, os.path.abspath('.'))
import sqlalchemy
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'builder.autodoc_mods',
'changelog',
'sphinx_paramlinks',
'builder.dialect_info',
'builder.mako',
'builder.sqlformatter',
'builder.viewsource',
]
# Add any paths that contain templates here, relative to this directory.
# not sure why abspath() is needed here, some users
# have reported this.
templates_path = [os.path.abspath('templates')]
nitpicky = True
# The suffix of source filenames.
source_suffix = '.rst'
# section names used by the changelog extension.
changelog_sections = ["general", "orm", "orm declarative", "orm querying", \
"orm configuration", "engine", "sql", \
"schema", \
"postgresql", "mysql", "sqlite", "mssql", \
"oracle", "firebird"]
# tags to sort on inside of sections
changelog_inner_tag_sort = ["feature", "bug", "moved", "changed", "removed"]
# how to render changelog links
changelog_render_ticket = "http://www.sqlalchemy.org/trac/ticket/%s"
changelog_render_pullreq = {
"bitbucket": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s",
"default": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s",
"github": "https://github.com/zzzeek/sqlalchemy/pull/%s",
}
changelog_render_changeset = "http://www.sqlalchemy.org/trac/changeset/%s"
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'SQLAlchemy'
copyright = u'2007-2014, the SQLAlchemy authors and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0.0"
release_date = "Not released"
site_base = os.environ.get("RTD_SITE_BASE", "http://www.sqlalchemy.org")
# arbitrary number recognized by builders.py, incrementing this
# will force a rebuild
build_number = 3
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# have the "gettext" build generate .pot for each individual
# .rst
gettext_compact = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s %s Documentation" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%m/%d/%Y %H:%M:%S'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
html_copy_source = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SQLAlchemydoc'
#autoclass_content = 'both'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'sqlalchemy_%s.tex' % release.replace('.', '_'), ur'SQLAlchemy Documentation',
ur'Mike Bayer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# sets TOC depth to 2.
latex_preamble = '\setcounter{tocdepth}{3}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
#latex_elements = {
# 'papersize': 'letterpaper',
# 'pointsize': '10pt',
#}
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sqlalchemy', u'SQLAlchemy Documentation',
[u'SQLAlchemy authors'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'SQLAlchemy'
epub_author = u'SQLAlchemy authors'
epub_publisher = u'SQLAlchemy authors'
epub_copyright = u'2007-2014, SQLAlchemy authors'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
intersphinx_mapping = {
'alembic': ('http://alembic.readthedocs.org/en/latest/', None),
'psycopg2': ('http://pythonhosted.org/psycopg2', None),
}
| mit |
myzhan/bottle-doc-zh-cn | test/test_mdict.py | 50 | 2047 | import unittest
from bottle import MultiDict, HeaderDict
class TestMultiDict(unittest.TestCase):
def test_isadict(self):
""" MultiDict should behaves like a normal dict """
d, m = dict(a=5), MultiDict(a=5)
d['key'], m['key'] = 'value', 'value'
d['k2'], m['k2'] = 'v1', 'v1'
d['k2'], m['k2'] = 'v2', 'v2'
self.assertEqual(list(d.keys()), list(m.keys()))
self.assertEqual(list(d.values()), list(m.values()))
self.assertEqual(list(d.keys()), list(m.iterkeys()))
self.assertEqual(list(d.values()), list(m.itervalues()))
self.assertEqual(d.get('key'), m.get('key'))
self.assertEqual(d.get('cay'), m.get('cay'))
self.assertEqual(list(iter(d)), list(iter(m)))
self.assertEqual([k for k in d], [k for k in m])
self.assertEqual(len(d), len(m))
self.assertEqual('key' in d, 'key' in m)
self.assertEqual('cay' in d, 'cay' in m)
self.assertRaises(KeyError, lambda: m['cay'])
def test_ismulti(self):
""" MultiDict has some special features """
m = MultiDict(a=5)
m['a'] = 6
self.assertEqual([5, 6], m.getall('a'))
self.assertEqual([], m.getall('b'))
self.assertEqual([('a', 5), ('a', 6)], list(m.iterallitems()))
def test_isheader(self):
""" HeaderDict replaces by default and title()s its keys """
m = HeaderDict(abc_def=5)
m['abc_def'] = 6
self.assertEqual(['6'], m.getall('abc_def'))
m.append('abc_def', 7)
self.assertEqual(['6', '7'], m.getall('abc_def'))
self.assertEqual([('Abc-Def', '6'), ('Abc-Def', '7')], list(m.iterallitems()))
def test_headergetbug(self):
''' Assure HeaderDict.get() to be case insensitive '''
d = HeaderDict()
d['UPPER'] = 'UPPER'
d['lower'] = 'lower'
self.assertEqual(d.get('upper'), 'UPPER')
self.assertEqual(d.get('LOWER'), 'lower')
if __name__ == '__main__': #pragma: no cover
unittest.main()
| mit |
fenglu-g/incubator-airflow | tests/contrib/operators/test_dataproc_operator.py | 4 | 29835 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import re
import unittest
from airflow import DAG, AirflowException
from airflow.contrib.operators.dataproc_operator import \
DataprocClusterCreateOperator, \
DataprocClusterDeleteOperator, \
DataProcHadoopOperator, \
DataProcHiveOperator, \
DataProcPySparkOperator, \
DataProcSparkOperator, \
DataprocWorkflowTemplateInstantiateInlineOperator, \
DataprocWorkflowTemplateInstantiateOperator, \
DataprocClusterScaleOperator
from airflow.version import version
from copy import deepcopy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
from mock import MagicMock, Mock
from mock import patch
TASK_ID = 'test-dataproc-operator'
CLUSTER_NAME = 'test-cluster-name'
GCP_PROJECT_ID = 'test-project-id'
NUM_WORKERS = 123
GCE_ZONE = 'us-central1-a'
NETWORK_URI = '/projects/project_id/regions/global/net'
SUBNETWORK_URI = '/projects/project_id/regions/global/subnet'
INTERNAL_IP_ONLY = True
TAGS = ['tag1', 'tag2']
STORAGE_BUCKET = 'gs://airflow-test-bucket/'
IMAGE_VERSION = '1.1'
CUSTOM_IMAGE = 'test-custom-image'
MASTER_MACHINE_TYPE = 'n1-standard-2'
MASTER_DISK_SIZE = 100
MASTER_DISK_TYPE = 'pd-standard'
WORKER_MACHINE_TYPE = 'n1-standard-2'
WORKER_DISK_SIZE = 100
WORKER_DISK_TYPE = 'pd-standard'
NUM_PREEMPTIBLE_WORKERS = 2
GET_INIT_ACTION_TIMEOUT = "600s" # 10m
LABEL1 = {}
LABEL2 = {'application': 'test', 'year': 2017}
SERVICE_ACCOUNT_SCOPES = [
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/bigtable.data'
]
IDLE_DELETE_TTL = 321
AUTO_DELETE_TIME = datetime.datetime(2017, 6, 7)
AUTO_DELETE_TTL = 654
DEFAULT_DATE = datetime.datetime(2017, 6, 6)
GCP_REGION = 'test-region'
MAIN_URI = 'test-uri'
TEMPLATE_ID = 'template-id'
HOOK = 'airflow.contrib.operators.dataproc_operator.DataProcHook'
DATAPROC_JOB_ID = 'dataproc_job_id'
DATAPROC_JOB_TO_SUBMIT = {
'job': {
'reference': {
'projectId': GCP_PROJECT_ID,
'jobId': DATAPROC_JOB_ID,
},
'placement': {
'clusterName': CLUSTER_NAME
}
}
}
def _assert_dataproc_job_id(mock_hook, dataproc_task):
hook = mock_hook.return_value
job = MagicMock()
job.build.return_value = DATAPROC_JOB_TO_SUBMIT
hook.create_job_template.return_value = job
dataproc_task.execute(None)
assert dataproc_task.dataproc_job_id == DATAPROC_JOB_ID
class DataprocClusterCreateOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterCreateOperator
def setUp(self):
# instantiate two different test cases with different labels.
self.labels = [LABEL1, LABEL2]
self.dataproc_operators = []
self.mock_conn = Mock()
for labels in self.labels:
self.dataproc_operators.append(
DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
zone=GCE_ZONE,
network_uri=NETWORK_URI,
subnetwork_uri=SUBNETWORK_URI,
internal_ip_only=INTERNAL_IP_ONLY,
tags=TAGS,
storage_bucket=STORAGE_BUCKET,
image_version=IMAGE_VERSION,
master_machine_type=MASTER_MACHINE_TYPE,
master_disk_type=MASTER_DISK_TYPE,
master_disk_size=MASTER_DISK_SIZE,
worker_machine_type=WORKER_MACHINE_TYPE,
worker_disk_type=WORKER_DISK_TYPE,
worker_disk_size=WORKER_DISK_SIZE,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
labels=deepcopy(labels),
service_account_scopes=SERVICE_ACCOUNT_SCOPES,
idle_delete_ttl=IDLE_DELETE_TTL,
auto_delete_time=AUTO_DELETE_TIME,
auto_delete_ttl=AUTO_DELETE_TTL
)
)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_init(self):
"""Test DataProcClusterOperator instance is properly initialized."""
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
self.assertEqual(dataproc_operator.cluster_name, CLUSTER_NAME)
self.assertEqual(dataproc_operator.project_id, GCP_PROJECT_ID)
self.assertEqual(dataproc_operator.num_workers, NUM_WORKERS)
self.assertEqual(dataproc_operator.zone, GCE_ZONE)
self.assertEqual(dataproc_operator.network_uri, NETWORK_URI)
self.assertEqual(dataproc_operator.subnetwork_uri, SUBNETWORK_URI)
self.assertEqual(dataproc_operator.tags, TAGS)
self.assertEqual(dataproc_operator.storage_bucket, STORAGE_BUCKET)
self.assertEqual(dataproc_operator.image_version, IMAGE_VERSION)
self.assertEqual(dataproc_operator.master_machine_type, MASTER_MACHINE_TYPE)
self.assertEqual(dataproc_operator.master_disk_size, MASTER_DISK_SIZE)
self.assertEqual(dataproc_operator.master_disk_type, MASTER_DISK_TYPE)
self.assertEqual(dataproc_operator.worker_machine_type, WORKER_MACHINE_TYPE)
self.assertEqual(dataproc_operator.worker_disk_size, WORKER_DISK_SIZE)
self.assertEqual(dataproc_operator.worker_disk_type, WORKER_DISK_TYPE)
self.assertEqual(dataproc_operator.num_preemptible_workers,
NUM_PREEMPTIBLE_WORKERS)
self.assertEqual(dataproc_operator.labels, self.labels[suffix])
self.assertEqual(dataproc_operator.service_account_scopes,
SERVICE_ACCOUNT_SCOPES)
self.assertEqual(dataproc_operator.idle_delete_ttl, IDLE_DELETE_TTL)
self.assertEqual(dataproc_operator.auto_delete_time, AUTO_DELETE_TIME)
self.assertEqual(dataproc_operator.auto_delete_ttl, AUTO_DELETE_TTL)
def test_get_init_action_timeout(self):
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
timeout = dataproc_operator._get_init_action_timeout()
self.assertEqual(timeout, "600s")
def test_build_cluster_data(self):
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['clusterName'], CLUSTER_NAME)
self.assertEqual(cluster_data['projectId'], GCP_PROJECT_ID)
self.assertEqual(cluster_data['config']['softwareConfig'],
{'imageVersion': IMAGE_VERSION})
self.assertEqual(cluster_data['config']['configBucket'], STORAGE_BUCKET)
self.assertEqual(cluster_data['config']['workerConfig']['numInstances'],
NUM_WORKERS)
self.assertEqual(
cluster_data['config']['secondaryWorkerConfig']['numInstances'],
NUM_PREEMPTIBLE_WORKERS)
self.assertEqual(
cluster_data['config']['gceClusterConfig']['serviceAccountScopes'],
SERVICE_ACCOUNT_SCOPES)
self.assertEqual(cluster_data['config']['gceClusterConfig']['internalIpOnly'],
INTERNAL_IP_ONLY)
self.assertEqual(cluster_data['config']['gceClusterConfig']['subnetworkUri'],
SUBNETWORK_URI)
self.assertEqual(cluster_data['config']['gceClusterConfig']['networkUri'],
NETWORK_URI)
self.assertEqual(cluster_data['config']['gceClusterConfig']['tags'],
TAGS)
self.assertEqual(cluster_data['config']['lifecycleConfig']['idleDeleteTtl'],
"321s")
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTime'],
"2017-06-07T00:00:00.000000Z")
# test whether the default airflow-version label has been properly
# set to the dataproc operator.
merged_labels = {}
merged_labels.update(self.labels[suffix])
merged_labels.update({'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')})
self.assertTrue(re.match(r'[a-z]([-a-z0-9]*[a-z0-9])?',
cluster_data['labels']['airflow-version']))
self.assertEqual(cluster_data['labels'], merged_labels)
def test_build_cluster_data_with_autoDeleteTime(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
zone=GCE_ZONE,
dag=self.dag,
auto_delete_time=AUTO_DELETE_TIME,
)
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTime'],
"2017-06-07T00:00:00.000000Z")
def test_build_cluster_data_with_autoDeleteTtl(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
zone=GCE_ZONE,
dag=self.dag,
auto_delete_ttl=AUTO_DELETE_TTL,
)
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTtl'],
"654s")
def test_build_cluster_data_with_autoDeleteTime_and_autoDeleteTtl(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
zone=GCE_ZONE,
dag=self.dag,
auto_delete_time=AUTO_DELETE_TIME,
auto_delete_ttl=AUTO_DELETE_TTL,
)
cluster_data = dataproc_operator._build_cluster_data()
if 'autoDeleteTtl' in cluster_data['config']['lifecycleConfig']:
self.fail("If 'auto_delete_time' and 'auto_delete_ttl' is set, " +
"only `auto_delete_time` is used")
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTime'],
"2017-06-07T00:00:00.000000Z")
def test_init_with_image_version_and_custom_image_both_set(self):
with self.assertRaises(AssertionError):
DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
zone=GCE_ZONE,
dag=self.dag,
image_version=IMAGE_VERSION,
custom_image=CUSTOM_IMAGE
)
def test_init_with_custom_image(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
zone=GCE_ZONE,
dag=self.dag,
custom_image=CUSTOM_IMAGE
)
cluster_data = dataproc_operator._build_cluster_data()
expected_custom_image_url = \
'https://www.googleapis.com/compute/beta/projects/' \
'{}/global/images/{}'.format(GCP_PROJECT_ID, CUSTOM_IMAGE)
self.assertEqual(cluster_data['config']['masterConfig']['imageUri'],
expected_custom_image_url)
self.assertEqual(cluster_data['config']['workerConfig']['imageUri'],
expected_custom_image_url)
def test_build_single_node_cluster(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=0,
num_preemptible_workers=0,
zone=GCE_ZONE,
dag=self.dag
)
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(
cluster_data['config']['softwareConfig']['properties']
['dataproc:dataproc.allow.zero.workers'], "true")
def test_init_cluster_with_zero_workers_and_not_non_zero_preemtibles(self):
with self.assertRaises(AssertionError):
DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=0,
num_preemptible_workers=2,
zone=GCE_ZONE,
dag=self.dag,
image_version=IMAGE_VERSION,
)
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
zone=GCE_ZONE,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Creating cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
zone=GCE_ZONE,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = {'ts_nodash': 'testnodash'}
rendered = dataproc_task.render_template(
'cluster_name',
getattr(dataproc_task, 'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Creating cluster: %s', u'smoke-cluster-testnodash')
def test_build_cluster_data_internal_ip_only_without_subnetwork(self):
def create_cluster_with_invalid_internal_ip_only_setup():
# Given
create_cluster = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
zone=GCE_ZONE,
dag=self.dag,
internal_ip_only=True)
# When
create_cluster._build_cluster_data()
# Then
with self.assertRaises(AirflowException) as cm:
create_cluster_with_invalid_internal_ip_only_setup()
self.assertEqual(str(cm.exception),
"Set internal_ip_only to true only when you pass a subnetwork_uri.")
class DataprocClusterScaleOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterScaleOperator
def setUp(self):
self.mock_execute = Mock()
self.mock_execute.execute = Mock(return_value={'done': True})
self.mock_get = Mock()
self.mock_get.get = Mock(return_value=self.mock_execute)
self.mock_operations = Mock()
self.mock_operations.get = Mock(return_value=self.mock_get)
self.mock_regions = Mock()
self.mock_regions.operations = Mock(return_value=self.mock_operations)
self.mock_projects = Mock()
self.mock_projects.regions = Mock(return_value=self.mock_regions)
self.mock_conn = Mock()
self.mock_conn.projects = Mock(return_value=self.mock_projects)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.hooks.gcp_dataproc_hook.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterScaleOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Scaling cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterScaleOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=GCP_PROJECT_ID,
num_workers=NUM_WORKERS,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = {'ts_nodash': 'testnodash'}
rendered = dataproc_task.render_template(
'cluster_name',
getattr(dataproc_task, 'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Scaling cluster: %s', u'smoke-cluster-testnodash')
class DataprocClusterDeleteOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterDeleteOperator
def setUp(self):
self.mock_execute = Mock()
self.mock_execute.execute = Mock(return_value={'done': True})
self.mock_get = Mock()
self.mock_get.get = Mock(return_value=self.mock_execute)
self.mock_operations = Mock()
self.mock_operations.get = Mock(return_value=self.mock_get)
self.mock_regions = Mock()
self.mock_regions.operations = Mock(return_value=self.mock_operations)
self.mock_projects = Mock()
self.mock_projects.regions = Mock(return_value=self.mock_regions)
self.mock_conn = Mock()
self.mock_conn.projects = Mock(return_value=self.mock_projects)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.hooks.gcp_dataproc_hook.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterDeleteOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT_ID,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Deleting cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterDeleteOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=GCP_PROJECT_ID,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = {'ts_nodash': 'testnodash'}
rendered = dataproc_task.render_template(
'cluster_name',
getattr(dataproc_task, 'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Deleting cluster: %s',
u'smoke-cluster-testnodash')
class DataProcHadoopOperatorTest(unittest.TestCase):
# Unit test for the DataProcHadoopOperator
@staticmethod
def test_hook_correct_region():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcHadoopOperator(
task_id=TASK_ID,
region=GCP_REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY,
GCP_REGION, mock.ANY)
@staticmethod
def test_dataproc_job_id_is_set():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcHadoopOperator(
task_id=TASK_ID
)
_assert_dataproc_job_id(mock_hook, dataproc_task)
class DataProcHiveOperatorTest(unittest.TestCase):
# Unit test for the DataProcHiveOperator
@staticmethod
def test_hook_correct_region():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcHiveOperator(
task_id=TASK_ID,
region=GCP_REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY,
GCP_REGION, mock.ANY)
@staticmethod
def test_dataproc_job_id_is_set():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcHiveOperator(
task_id=TASK_ID
)
_assert_dataproc_job_id(mock_hook, dataproc_task)
class DataProcPySparkOperatorTest(unittest.TestCase):
# Unit test for the DataProcPySparkOperator
@staticmethod
def test_hook_correct_region():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcPySparkOperator(
task_id=TASK_ID,
main=MAIN_URI,
region=GCP_REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY,
GCP_REGION, mock.ANY)
@staticmethod
def test_dataproc_job_id_is_set():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcPySparkOperator(
task_id=TASK_ID,
main=MAIN_URI
)
_assert_dataproc_job_id(mock_hook, dataproc_task)
class DataProcSparkOperatorTest(unittest.TestCase):
# Unit test for the DataProcSparkOperator
@staticmethod
def test_hook_correct_region():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcSparkOperator(
task_id=TASK_ID,
region=GCP_REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY,
GCP_REGION, mock.ANY)
@staticmethod
def test_dataproc_job_id_is_set():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcSparkOperator(
task_id=TASK_ID
)
_assert_dataproc_job_id(mock_hook, dataproc_task)
class DataprocWorkflowTemplateInstantiateOperatorTest(unittest.TestCase):
def setUp(self):
# Setup service.projects().regions().workflowTemplates().instantiate().execute()
self.operation = {'name': 'operation', 'done': True}
self.mock_execute = Mock()
self.mock_execute.execute.return_value = self.operation
self.mock_workflows = Mock()
self.mock_workflows.instantiate.return_value = self.mock_execute
self.mock_regions = Mock()
self.mock_regions.workflowTemplates.return_value = self.mock_workflows
self.mock_projects = Mock()
self.mock_projects.regions.return_value = self.mock_regions
self.mock_conn = Mock()
self.mock_conn.projects.return_value = self.mock_projects
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_workflow(self):
with patch(HOOK) as MockHook:
hook = MockHook()
hook.get_conn.return_value = self.mock_conn
hook.wait.return_value = None
dataproc_task = DataprocWorkflowTemplateInstantiateOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT_ID,
region=GCP_REGION,
template_id=TEMPLATE_ID,
dag=self.dag
)
dataproc_task.execute(None)
template_name = (
'projects/test-project-id/regions/test-region/'
'workflowTemplates/template-id')
self.mock_workflows.instantiate.assert_called_once_with(
name=template_name,
body=mock.ANY)
hook.wait.assert_called_once_with(self.operation)
class DataprocWorkflowTemplateInstantiateInlineOperatorTest(unittest.TestCase):
def setUp(self):
# Setup service.projects().regions().workflowTemplates().instantiateInline()
# .execute()
self.operation = {'name': 'operation', 'done': True}
self.mock_execute = Mock()
self.mock_execute.execute.return_value = self.operation
self.mock_workflows = Mock()
self.mock_workflows.instantiateInline.return_value = self.mock_execute
self.mock_regions = Mock()
self.mock_regions.workflowTemplates.return_value = self.mock_workflows
self.mock_projects = Mock()
self.mock_projects.regions.return_value = self.mock_regions
self.mock_conn = Mock()
self.mock_conn.projects.return_value = self.mock_projects
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_iniline_workflow(self):
with patch(HOOK) as MockHook:
hook = MockHook()
hook.get_conn.return_value = self.mock_conn
hook.wait.return_value = None
template = {
"placement": {
"managed_cluster": {
"cluster_name": CLUSTER_NAME,
"config": {
"gce_cluster_config": {
"zone_uri": GCE_ZONE,
}
}
}
},
"jobs": [
{
"step_id": "say-hello",
"pig_job": {
"query": "sh echo hello"
}
}],
}
dataproc_task = DataprocWorkflowTemplateInstantiateInlineOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT_ID,
region=GCP_REGION,
template=template,
dag=self.dag
)
dataproc_task.execute(None)
self.mock_workflows.instantiateInline.assert_called_once_with(
parent='projects/test-project-id/regions/test-region',
instanceId=mock.ANY,
body=template)
hook.wait.assert_called_once_with(self.operation)
| apache-2.0 |
LouisPlisso/visemo | bs4/builder/_lxml.py | 446 | 8661 | __all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| gpl-3.0 |
abzaloid/kazakh-story-generator | webserver/lib/jinja2/__init__.py | 73 | 2565 | # -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.9.6'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
make_logging_undefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined, select_autoescape
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
'select_autoescape',
]
def _patch_async():
from jinja2.utils import have_async_gen
if have_async_gen:
from jinja2.asyncsupport import patch_all
patch_all()
_patch_async()
del _patch_async
| mit |
40223101/2015final2 | static/Brython3.1.3-20150514-095342/Lib/decimal.py | 623 | 230510 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
import copy as _copy
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
#brython fixme
pass
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
#brython fix me
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
#brython fix me
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
#brython fix me
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
#brython fix me
pass
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
#brython fix me
pass
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
value=value.strip().lower()
if value.startswith("-"):
self._sign = 1
value=value[1:]
else:
self._sign = 0
if value in ('', 'nan'):
self._is_special = True
self._int = ''
#if m.group('signal'): #figure out what a signaling NaN is later
# self._exp = 'N'
#else:
# self._exp = 'n'
self._exp='n'
return self
if value in ('inf', 'infinity'):
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
import _jsre as re
_m=re.match("^\d*\.?\d*(e\+?\d*)?$", value)
if not _m:
self._is_special = True
self._int = ''
self._exp='n'
return self
if '.' in value:
intpart, fracpart=value.split('.')
if 'e' in fracpart:
fracpart, exp=fracpart.split('e')
exp=int(exp)
else:
exp=0
#self._int = str(int(intpart+fracpart))
self._int = intpart+fracpart
self._exp = exp - len(fracpart)
self._is_special = False
return self
else:
#is this a pure int?
self._is_special = False
if 'e' in value:
self._int, _exp=value.split('e')
self._exp=int(_exp)
#print(self._int, self._exp)
else:
self._int = value
self._exp = 0
return self
#m = _parser(value.strip())
#if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
#if m.group('sign') == "-":
# self._sign = 1
#else:
# self._sign = 0
#intpart = m.group('int')
#if intpart is not None:
# # finite number
# fracpart = m.group('frac') or ''
# exp = int(m.group('exp') or '0')
# self._int = str(int(intpart+fracpart))
# self._exp = exp - len(fracpart)
# self._is_special = False
#else:
# diag = m.group('diag')
# if diag is not None:
# # NaN
# self._int = str(int(diag or '0')).lstrip('0')
# if m.group('signal'):
# self._exp = 'N'
# else:
# self._exp = 'n'
# else:
# # infinity
# self._int = '0'
# self._exp = 'F'
# self._is_special = True
#return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=17, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=308,
Emin=-324,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
#import re
#_parser = re.compile(r""" # A numeric string consists of:
# \s*
# (?P<sign>[-+])? # an optional sign, followed by either...
# (
# (?=\d|\.\d) # ...a number (with at least one digit)
# (?P<int>\d*) # having a (possibly empty) integer part
# (\.(?P<frac>\d*))? # followed by an optional fractional part
# (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
# |
# Inf(inity)? # ...an infinity, or...
# |
# (?P<signal>s)? # ...an (optionally signaling)
# NaN # NaN
# (?P<diag>\d*) # with (possibly empty) diagnostic info.
# )
# \s*
# \Z
#""", re.VERBOSE | re.IGNORECASE).match
import _jsre as re
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
#_parse_format_specifier_regex = re.compile(r"""\A
#(?:
# (?P<fill>.)?
# (?P<align>[<>=^])
#)?
#(?P<sign>[-+ ])?
#(?P<alt>\#)?
#(?P<zeropad>0)?
#(?P<minimumwidth>(?!0)\d+)?
#(?P<thousands_sep>,)?
#(?:\.(?P<precision>0|(?!0)\d+))?
#(?P<type>[eEfFgGn%])?
#\Z
#""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
try:
import _decimal
except ImportError:
pass
else:
s1 = set(dir())
s2 = set(dir(_decimal))
for name in s1 - s2:
del globals()[name]
del s1, s2, name
from _decimal import *
if __name__ == '__main__':
import doctest, decimal
doctest.testmod(decimal)
| gpl-3.0 |
leotsem/django-ios-notifications | ios_notifications/decorators.py | 1 | 2062 | from django.contrib.auth import authenticate
from ios_notifications.http import JSONResponse
from django.conf import settings
import binascii
class InvalidAuthenticationType(Exception):
pass
# TODO: OAuth
VALID_AUTH_TYPES = ('AuthBasic', 'AuthBasicIsStaff', 'AuthNone')
def api_authentication_required(func):
"""
Check the value of IOS_NOTIFICATIONS_AUTHENTICATION in settings
and authenticate the request user appropriately.
"""
def wrapper(request, *args, **kwargs):
AUTH_TYPE = getattr(settings, 'IOS_NOTIFICATIONS_AUTHENTICATION', None)
if AUTH_TYPE is None or AUTH_TYPE not in VALID_AUTH_TYPES:
raise InvalidAuthenticationType('IOS_NOTIFICATIONS_AUTHENTICATION must be specified in your settings.py file.\
Valid options are "AuthBasic", "AuthBasicIsStaff" or "AuthNone"')
# Basic Authorization
elif AUTH_TYPE == 'AuthBasic' or AUTH_TYPE == 'AuthBasicIsStaff':
if 'HTTP_AUTHORIZATION' in request.META:
auth_type, encoded_user_password = request.META['HTTP_AUTHORIZATION'].split(' ')
try:
userpass = encoded_user_password.decode('base64')
except binascii.Error:
return JSONResponse({'error': 'invalid base64 encoded header'}, status=401)
try:
username, password = userpass.split(':')
except ValueError:
return JSONResponse({'error': 'malformed Authorization header'}, status=401)
user = authenticate(username=username, password=password)
if user is not None:
if AUTH_TYPE == 'AuthBasic' or user.is_staff:
return func(request, *args, **kwargs)
return JSONResponse({'error': 'authentication error'}, status=401)
return JSONResponse({'error': 'Authorization header not set'}, status=401)
# AuthNone: No authorization.
return func(request, *args, **kwargs)
return wrapper
| bsd-3-clause |
Konosh93/locallibrary | locallibrary/settings.py | 1 | 4331 | """
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
'''
##
from unipath import Path
PROJECT_DIR = Path(__file__).parent
STATIC_ROOT = PROJECT_DIR.parent.child('staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
PROJECT_DIR.child('static'),
)
##
'''
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = '70x=f-90^x9n1&4zgvqn)3kb_y+_loktvb#@ez10w5h63tgvf1'
import os
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
DEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'locallibrary',
'USER': 'omer',
'PASSWORD': 'mypass',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC' #'JST'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage' | mit |
red2fred2/todo-app | node_modules/node-gyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mpl-2.0 |
darkleons/odoo | addons/website_sale_delivery/models/sale_order.py | 48 | 6001 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
from openerp import SUPERUSER_ID
from openerp.addons import decimal_precision
class delivery_carrier(orm.Model):
_inherit = 'delivery.carrier'
_columns = {
'website_published': fields.boolean('Available in the website', copy=False),
'website_description': fields.text('Description for the website'),
}
_defaults = {
'website_published': True
}
class SaleOrder(orm.Model):
_inherit = 'sale.order'
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = super(SaleOrder, self)._amount_all(cr, uid, ids, field_name, arg, context=context)
currency_pool = self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
line_amount = sum([line.price_subtotal for line in order.order_line if line.is_delivery])
currency = order.pricelist_id.currency_id
res[order.id]['amount_delivery'] = currency_pool.round(cr, uid, currency, line_amount)
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
_columns = {
'amount_delivery': fields.function(
_amount_all_wrapper, type='float', digits_compute=decimal_precision.get_precision('Account'),
string='Delivery Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'
),
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
domain=[('is_delivery', '=', False)],
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
}
def _check_carrier_quotation(self, cr, uid, order, force_carrier_id=None, context=None):
carrier_obj = self.pool.get('delivery.carrier')
# check to add or remove carrier_id
if not order:
return False
if all(line.product_id.type == "service" for line in order.website_order_line):
order.write({'carrier_id': None}, context=context)
self.pool['sale.order']._delivery_unset(cr, SUPERUSER_ID, [order.id], context=context)
return True
else:
carrier_id = force_carrier_id or order.carrier_id.id
carrier_ids = self._get_delivery_methods(cr, uid, order, context=context)
if carrier_id:
if carrier_id not in carrier_ids:
carrier_id = False
else:
carrier_ids.remove(carrier_id)
carrier_ids.insert(0, carrier_id)
if force_carrier_id or not carrier_id or not carrier_id in carrier_ids:
for delivery_id in carrier_ids:
grid_id = carrier_obj.grid_get(cr, SUPERUSER_ID, [delivery_id], order.partner_shipping_id.id)
if grid_id:
carrier_id = delivery_id
break
order.write({'carrier_id': carrier_id}, context=context)
if carrier_id:
order.delivery_set(context=context)
else:
order._delivery_unset(context=context)
return bool(carrier_id)
def _get_delivery_methods(self, cr, uid, order, context=None):
carrier_obj = self.pool.get('delivery.carrier')
delivery_ids = carrier_obj.search(cr, uid, [('website_published','=',True)], context=context)
# Following loop is done to avoid displaying delivery methods who are not available for this order
# This can surely be done in a more efficient way, but at the moment, it mimics the way it's
# done in delivery_set method of sale.py, from delivery module
for delivery_id in list(delivery_ids):
grid_id = carrier_obj.grid_get(cr, SUPERUSER_ID, [delivery_id], order.partner_shipping_id.id)
if not grid_id:
delivery_ids.remove(delivery_id)
return delivery_ids
def _get_errors(self, cr, uid, order, context=None):
errors = super(SaleOrder, self)._get_errors(cr, uid, order, context=context)
if not self._get_delivery_methods(cr, uid, order, context=context):
errors.append(('No delivery method available', 'There is no available delivery method for your order'))
return errors
def _get_website_data(self, cr, uid, order, context=None):
""" Override to add delivery-related website data. """
values = super(SaleOrder, self)._get_website_data(cr, uid, order, context=context)
# We need a delivery only if we have stockable products
has_stockable_products = False
for line in order.order_line:
if line.product_id.type in ('consu', 'product'):
has_stockable_products = True
if not has_stockable_products:
return values
delivery_ctx = dict(context, order_id=order.id)
DeliveryCarrier = self.pool.get('delivery.carrier')
delivery_ids = self._get_delivery_methods(cr, uid, order, context=context)
values['deliveries'] = DeliveryCarrier.browse(cr, SUPERUSER_ID, delivery_ids, context=delivery_ctx)
return values
| agpl-3.0 |
quasipedia/googios | setup.py | 1 | 2724 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os.path as path
from setuptools import setup, find_packages
long_description = '''
GooGios
=======
GooGios allows to use Google apps (calendar, contacts) to manage a roster of
on-call people. The script keeps in sync a local cache of the on-call shifts
and it can be queried to output information like who is the current person on
duty, what's their telephone number and email address, as well as getting the
complete roster between date X and Y.
Licensed under GPLv3+.
Documentation can be found on the public `repository on GitHub`_:
.. _repository on GitHub: https://github.com/quasipedia/googios
'''
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'VERSION')) as f:
version = f.read().strip()
setup(
# Project details
name='googios',
version=version,
description='Manage your Nagios on-call roster with Google apps',
long_description=long_description,
url='https://github.com/quasipedia/googios',
# Author details
author='Mac Ryan',
author_email='quasipedia@gmail.com',
license='GPLv3+',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta', # 5 - Production/Stable
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v3 or later',
'Intended Audience :: Developers',
'Topic :: System :: Monitoring',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
keywords='nagios google google-apps system-monitoring uptime alarms',
packages=find_packages(),
install_requires=[
'docopt==0.6.2',
'google-api-python-client==1.3.1',
'gdata==2.0.18',
'pycrypto==2.6.1',
'unicodecsv==0.9.4',
'pytz==2014.10',
'python-dateutil==2.3',
'clint==0.4.1',
],
extras_require={
'dev': ['wheel>=0.24.0'],
},
# package_data={
# 'googios': ['setup-questions.json'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
entry_points={
'console_scripts': [
'googios=googios.googios:main',
],
},
)
| gpl-3.0 |
NoelMacwan/android_kernel_sony_msm8226 | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
craftytrickster/servo | tests/wpt/harness/wptrunner/executors/executorservodriver.py | 40 | 8852 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import socket
import threading
import time
import traceback
from .base import (Protocol,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
strip_server)
from .. import webdriver
from ..testrunner import Stop
webdriver = None
here = os.path.join(os.path.split(__file__)[0])
extra_timeout = 5
def do_delayed_imports():
global webdriver
import webdriver
class ServoWebDriverProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.host = browser.webdriver_host
self.port = browser.webdriver_port
self.session = None
def setup(self, runner):
"""Connect to browser via WebDriver."""
self.runner = runner
url = "http://%s:%d" % (self.host, self.port)
session_started = False
try:
self.session = webdriver.Session(self.host, self.port,
extension=webdriver.servo.ServoCommandExtensions)
self.session.start()
except:
self.logger.warning(
"Connecting with WebDriver failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect via WebDriver")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on WebDriver session")
try:
self.session.end()
except:
pass
def is_alive(self):
try:
# Get a simple property over the connection
self.session.window_handle
# TODO what exception?
except Exception:
return False
return True
def after_connect(self):
pass
def wait(self):
while True:
try:
self.session.execute_async_script("")
except webdriver.TimeoutException:
pass
except (socket.timeout, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
def on_environment_change(self, old_environment, new_environment):
#Unset all the old prefs
self.session.extension.reset_prefs(*old_environment.get("prefs", {}).keys())
self.session.extension.set_prefs(new_environment.get("prefs", {}))
class ServoWebDriverRun(object):
def __init__(self, func, session, url, timeout, current_timeout=None):
self.func = func
self.result = None
self.session = session
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(self.timeout + extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.url, self.timeout)
except webdriver.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
def timeout_func(timeout):
if timeout:
t0 = time.time()
return lambda: time.time() - t0 > timeout + extra_timeout
else:
return lambda: False
class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None):
TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None)
self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities)
with open(os.path.join(here, "testharness_servodriver.js")) as f:
self.script = f.read()
self.timeout = None
def on_protocol_change(self, new_protocol):
pass
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
url = self.test_url(test)
timeout = test.timeout * self.timeout_multiplier + extra_timeout
if timeout != self.timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
success, data = ServoWebDriverRun(self.do_testharness,
self.protocol.session,
url,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, session, url, timeout):
session.url = url
result = json.loads(
session.execute_async_script(
self.script % {"abs_url": url,
"url": strip_server(url),
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000}))
# Prevent leaking every page in history until Servo develops a more sane
# page cache
session.back()
return result
class TimeoutError(Exception):
pass
class ServoWebDriverRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, capabilities=None, debug_info=None):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = ServoWebDriverProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.timeout = None
with open(os.path.join(here, "reftest-wait_servodriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
try:
result = self.implementation.run_test(test)
return self.convert_result(test, result)
except IOError:
return test.result_cls("CRASH", None), []
except TimeoutError:
return test.result_cls("TIMEOUT", None), []
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls("ERROR", message), []
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
timeout = (test.timeout * self.timeout_multiplier + extra_timeout
if self.debug_info is None else None)
if self.timeout != timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
return ServoWebDriverRun(self._screenshot,
self.protocol.session,
self.test_url(test),
timeout).run()
def _screenshot(self, session, url, timeout):
session.url = url
session.execute_async_script(self.wait_script)
return session.screenshot()
| mpl-2.0 |
phlax/translate | translate/tools/test_pypo2phppo.py | 4 | 1706 | # -*- coding: utf-8 -*-
# pypo2phppo unit tests
# Author: Wil Clouser <wclouser@mozilla.com>
# Date: 2009-12-03
from io import BytesIO
from translate.convert import test_convert
from translate.tools import pypo2phppo
class TestPyPo2PhpPo:
def test_single_po(self):
inputfile = b"""
# This user comment refers to: {0}
#. This developer comment does too: {0}
#: some/path.php:111
#, php-format
msgid "I have {1} apples and {0} oranges"
msgstr "I have {1} apples and {0} oranges"
"""
outputfile = BytesIO()
pypo2phppo.convertpy2php(inputfile, outputfile)
output = outputfile.getvalue().decode('utf-8')
assert "refers to: %1$s" in output
assert "does too: %1$s" in output
assert 'msgid "I have %2$s apples and %1$s oranges"' in output
assert 'msgstr "I have %2$s apples and %1$s oranges"' in output
def test_plural_po(self):
inputfile = b"""
#. This developer comment refers to {0}
#: some/path.php:111
#, php-format
msgid "I have {0} apple"
msgid_plural "I have {0} apples"
msgstr[0] "I have {0} apple"
msgstr[1] "I have {0} apples"
"""
outputfile = BytesIO()
pypo2phppo.convertpy2php(inputfile, outputfile)
output = outputfile.getvalue().decode('utf-8')
assert 'msgid "I have %1$s apple"' in output
assert 'msgid_plural "I have %1$s apples"' in output
assert 'msgstr[0] "I have %1$s apple"' in output
assert 'msgstr[1] "I have %1$s apples"' in output
class TestPyPo2PhpPoCommand(test_convert.TestConvertCommand, TestPyPo2PhpPo):
"""Tests running actual pypo2phppo commands on files"""
convertmodule = pypo2phppo
defaultoptions = {}
| gpl-2.0 |
MobinRanjbar/hue | apps/search/src/search/management/commands/search_setup.py | 9 | 1378 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.core import management
from django.core.management.base import NoArgsCommand
from desktop.models import Document, Document2, SAMPLE_USER_OWNERS
from useradmin.models import install_sample_user
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
if not Document2.objects.filter(type='search-dashboard', owner__username__in=SAMPLE_USER_OWNERS).exists():
install_sample_user()
management.call_command('loaddata', 'initial_search_examples.json', verbosity=2)
Document.objects.sync()
| apache-2.0 |
lunafeng/django | tests/template_tests/syntax_tests/test_named_endblock.py | 521 | 2312 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class NamedEndblockTests(SimpleTestCase):
@setup({'namedendblocks01': '1{% block first %}_{% block second %}'
'2{% endblock second %}_{% endblock first %}3'})
def test_namedendblocks01(self):
output = self.engine.render_to_string('namedendblocks01')
self.assertEqual(output, '1_2_3')
# Unbalanced blocks
@setup({'namedendblocks02': '1{% block first %}_{% block second %}'
'2{% endblock first %}_{% endblock second %}3'})
def test_namedendblocks02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks02')
@setup({'namedendblocks03': '1{% block first %}_{% block second %}'
'2{% endblock %}_{% endblock second %}3'})
def test_namedendblocks03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks03')
@setup({'namedendblocks04': '1{% block first %}_{% block second %}'
'2{% endblock second %}_{% endblock third %}3'})
def test_namedendblocks04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks04')
@setup({'namedendblocks05': '1{% block first %}_{% block second %}2{% endblock first %}'})
def test_namedendblocks05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks05')
# Mixed named and unnamed endblocks
@setup({'namedendblocks06': '1{% block first %}_{% block second %}'
'2{% endblock %}_{% endblock first %}3'})
def test_namedendblocks06(self):
"""
Mixed named and unnamed endblocks
"""
output = self.engine.render_to_string('namedendblocks06')
self.assertEqual(output, '1_2_3')
@setup({'namedendblocks07': '1{% block first %}_{% block second %}'
'2{% endblock second %}_{% endblock %}3'})
def test_namedendblocks07(self):
output = self.engine.render_to_string('namedendblocks07')
self.assertEqual(output, '1_2_3')
| bsd-3-clause |
BT-fgarbely/account-invoicing | account_invoice_shipping_address/__openerp__.py | 10 | 1559 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2013 Andrea Cometa Perito Informatico (www.andreacometa.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Invoice Shipping Address",
'summary': """
Adds a shipping address field to the invoice.""",
"version": "0.1.1",
'category': 'Generic Modules/Accounting',
"depends": ["account", "sale", "sale_stock"],
"author": "Andrea Cometa, Agile Business Group,"
"Odoo Community Association (OCA)",
'website': 'http://www.andreacometa.it',
'license': 'AGPL-3',
'data': [
'invoice_view.xml',
],
'installable': True,
}
| agpl-3.0 |
EdLeafe/pyrax | samples/cloud_blockstorage/create_volume.py | 13 | 1259 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
cbs = pyrax.cloud_blockstorage
vol_name = pyrax.utils.random_ascii(length=8)
sata_vol = cbs.create(name="my_standard_volume", size=500, volume_type="SATA")
ssd_vol = cbs.create(name="my_fast_volume", size=500, volume_type="SSD")
print("SATA:", sata_vol)
print()
print("SSD:", ssd_vol)
print()
print("To delete these volumes, run 'delete_volume.py'")
print()
| apache-2.0 |
campbe13/openhatch | vendor/packages/Django/django/test/signals.py | 222 | 2719 | import os
import time
from django.conf import settings
from django.db import connections
from django.dispatch import receiver, Signal
from django.utils import timezone
from django.utils.functional import empty
template_rendered = Signal(providing_args=["template", "context"])
setting_changed = Signal(providing_args=["setting", "value"])
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone._localtime = None
# Reset the database connections' time zone
if kwargs['setting'] == 'USE_TZ' and settings.TIME_ZONE != 'UTC':
USE_TZ, TIME_ZONE = kwargs['value'], settings.TIME_ZONE
elif kwargs['setting'] == 'TIME_ZONE' and not settings.USE_TZ:
USE_TZ, TIME_ZONE = settings.USE_TZ, kwargs['value']
else:
# no need to change the database connnections' time zones
return
tz = 'UTC' if USE_TZ else TIME_ZONE
for conn in connections.all():
conn.settings_dict['TIME_ZONE'] = tz
tz_sql = conn.ops.set_time_zone_sql()
if tz_sql:
conn.cursor().execute(tz_sql, [tz])
@receiver(setting_changed)
def clear_context_processors_cache(**kwargs):
if kwargs['setting'] == 'TEMPLATE_CONTEXT_PROCESSORS':
from django.template import context
context._standard_context_processors = None
@receiver(setting_changed)
def clear_template_loaders_cache(**kwargs):
if kwargs['setting'] == 'TEMPLATE_LOADERS':
from django.template import loader
loader.template_source_loaders = None
@receiver(setting_changed)
def clear_serializers_cache(**kwargs):
if kwargs['setting'] == 'SERIALIZATION_MODULES':
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in ('LOCALE_PATHS', 'LANGUAGE_CODE'):
from django.utils.translation import trans_real
trans_real._default = None
if kwargs['setting'] == 'LOCALE_PATHS':
trans_real._translations = {}
@receiver(setting_changed)
def file_storage_changed(**kwargs):
if kwargs['setting'] in ('MEDIA_ROOT', 'DEFAULT_FILE_STORAGE'):
from django.core.files.storage import default_storage
default_storage._wrapped = empty
| agpl-3.0 |
porcobosso/spark-ec2 | lib/boto-2.34.0/boto/glacier/layer2.py | 153 | 3745 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.glacier.layer1 import Layer1
from boto.glacier.vault import Vault
class Layer2(object):
"""
Provides a more pythonic and friendly interface to Glacier based on Layer1
"""
def __init__(self, *args, **kwargs):
# Accept a passed in layer1, mainly to allow easier testing
if "layer1" in kwargs:
self.layer1 = kwargs["layer1"]
else:
self.layer1 = Layer1(*args, **kwargs)
def create_vault(self, name):
"""Creates a vault.
:type name: str
:param name: The name of the vault
:rtype: :class:`boto.glacier.vault.Vault`
:return: A Vault object representing the vault.
"""
self.layer1.create_vault(name)
return self.get_vault(name)
def delete_vault(self, name):
"""Delete a vault.
This operation deletes a vault. Amazon Glacier will delete a
vault only if there are no archives in the vault as per the
last inventory and there have been no writes to the vault
since the last inventory. If either of these conditions is not
satisfied, the vault deletion fails (that is, the vault is not
removed) and Amazon Glacier returns an error.
This operation is idempotent, you can send the same request
multiple times and it has no further effect after the first
time Amazon Glacier delete the specified vault.
:type vault_name: str
:param vault_name: The name of the vault to delete.
"""
return self.layer1.delete_vault(name)
def get_vault(self, name):
"""
Get an object representing a named vault from Glacier. This
operation does not check if the vault actually exists.
:type name: str
:param name: The name of the vault
:rtype: :class:`boto.glacier.vault.Vault`
:return: A Vault object representing the vault.
"""
response_data = self.layer1.describe_vault(name)
return Vault(self.layer1, response_data)
def list_vaults(self):
"""
Return a list of all vaults associated with the account ID.
:rtype: List of :class:`boto.glacier.vault.Vault`
:return: A list of Vault objects.
"""
vaults = []
marker = None
while True:
response_data = self.layer1.list_vaults(marker=marker, limit=1000)
vaults.extend([Vault(self.layer1, rd) for rd in response_data['VaultList']])
marker = response_data.get('Marker')
if not marker:
break
return vaults
| apache-2.0 |
helldorado/ansible | test/units/modules/network/f5/test_bigip_vlan.py | 16 | 10690 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_vlan import ApiParameters
from library.modules.bigip_vlan import ModuleParameters
from library.modules.bigip_vlan import ModuleManager
from library.modules.bigip_vlan import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_vlan import ApiParameters
from ansible.modules.network.f5.bigip_vlan import ModuleParameters
from ansible.modules.network.f5.bigip_vlan import ModuleManager
from ansible.modules.network.f5.bigip_vlan import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class BigIpObj(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='somevlan',
tag=213,
description='fakevlan',
untagged_interfaces=['1.1'],
)
p = ModuleParameters(params=args)
assert p.name == 'somevlan'
assert p.tag == 213
assert p.description == 'fakevlan'
assert p.untagged_interfaces == ['1.1']
def test_api_parameters(self):
args = dict(
name='somevlan',
description='fakevlan',
tag=213
)
p = ApiParameters(params=args)
assert p.name == 'somevlan'
assert p.tag == 213
assert p.description == 'fakevlan'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_vlan(self, *args):
set_module_args(dict(
name='somevlan',
description='fakevlan',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'fakevlan'
def test_create_vlan_tagged_interface(self, *args):
set_module_args(dict(
name='somevlan',
tagged_interface=['2.1'],
tag=213,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['tagged_interfaces'] == ['2.1']
assert results['tag'] == 213
def test_create_vlan_untagged_interface(self, *args):
set_module_args(dict(
name='somevlan',
untagged_interface=['2.1'],
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['untagged_interfaces'] == ['2.1']
def test_create_vlan_tagged_interfaces(self, *args):
set_module_args(dict(
name='somevlan',
tagged_interface=['2.1', '1.1'],
tag=213,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['tagged_interfaces'] == ['1.1', '2.1']
assert results['tag'] == 213
def test_create_vlan_untagged_interfaces(self, *args):
set_module_args(dict(
name='somevlan',
untagged_interface=['2.1', '1.1'],
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['untagged_interfaces'] == ['1.1', '2.1']
def test_update_vlan_untag_interface(self, *args):
set_module_args(dict(
name='somevlan',
untagged_interface=['2.1'],
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
current = ApiParameters(params=load_fixture('load_vlan.json'))
interfaces = load_fixture('load_vlan_interfaces.json')
current.update({'interfaces': interfaces})
mm.update_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['untagged_interfaces'] == ['2.1']
def test_update_vlan_tag_interface(self, *args):
set_module_args(dict(
name='somevlan',
tagged_interface=['2.1'],
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
current = ApiParameters(params=load_fixture('load_vlan.json'))
mm.update_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['tagged_interfaces'] == ['2.1']
def test_update_vlan_description(self, *args):
set_module_args(dict(
name='somevlan',
description='changed_that',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
current = ApiParameters(params=load_fixture('update_vlan_description.json'))
mm.update_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'changed_that'
| gpl-3.0 |
marratj/ansible | lib/ansible/modules/network/iosxr/iosxr_facts.py | 27 | 12721 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_facts
version_added: "2.2"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Collect facts from remote devices running IOS XR
description:
- Collects a base set of device facts from a remote device that
is running IOS XR. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: iosxr
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Collect all facts from the device
- iosxr_facts:
gather_subset: all
# Collect only the config and default facts
- iosxr_facts:
gather_subset:
- config
# Do not collect hardware facts
- iosxr_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
ansible_net_image:
description: The image file the device is running
returned: always
type: string
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.iosxr import iosxr_argument_spec, check_args, run_commands
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
class FactsBase(object):
def __init__(self):
self.facts = dict()
self.commands()
def commands(self):
raise NotImplementedError
class Default(FactsBase):
def commands(self):
return(['show version brief'])
def populate(self, results):
self.facts['version'] = self.parse_version(results['show version brief'])
self.facts['image'] = self.parse_image(results['show version brief'])
self.facts['hostname'] = self.parse_hostname(results['show version brief'])
def parse_version(self, data):
match = re.search(r'Version (\S+)$', data, re.M)
if match:
return match.group(1)
def parse_hostname(self, data):
match = re.search(r'^(.+) uptime', data, re.M)
if match:
return match.group(1)
def parse_image(self, data):
match = re.search(r'image file is "(.+)"', data)
if match:
return match.group(1)
class Hardware(FactsBase):
def commands(self):
return(['dir /all', 'show memory summary'])
def populate(self, results):
self.facts['filesystems'] = self.parse_filesystems(
results['dir /all'])
match = re.search(r'Physical Memory: (\d+)M total \((\d+)',
results['show memory summary'])
if match:
self.facts['memtotal_mb'] = match.group(1)
self.facts['memfree_mb'] = match.group(2)
def parse_filesystems(self, data):
return re.findall(r'^Directory of (\S+)', data, re.M)
class Config(FactsBase):
def commands(self):
return(['show running-config'])
def populate(self, results):
self.facts['config'] = results['show running-config']
class Interfaces(FactsBase):
def commands(self):
return(['show interfaces', 'show ipv6 interface',
'show lldp', 'show lldp neighbors detail'])
def populate(self, results):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
interfaces = self.parse_interfaces(results['show interfaces'])
self.facts['interfaces'] = self.populate_interfaces(interfaces)
data = results['show ipv6 interface']
if len(data) > 0:
data = self.parse_interfaces(data)
self.populate_ipv6_interfaces(data)
if 'LLDP is not enabled' not in results['show lldp']:
neighbors = results['show lldp neighbors detail']
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
ipv4 = self.parse_ipv4(value)
intf['ipv4'] = self.parse_ipv4(value)
if ipv4:
self.add_ip_address(ipv4['address'], 'ipv4')
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
if key in ['No', 'RPF'] or key.startswith('IP'):
continue
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_neighbors(self, neighbors):
facts = dict()
nbors = neighbors.split('------------------------------------------------')
for entry in nbors[1:]:
if entry == '':
continue
intf = self.parse_lldp_intf(entry)
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['host'] = self.parse_lldp_host(entry)
fact['port'] = self.parse_lldp_port(entry)
facts[intf].append(fact)
return facts
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+)', line)
if match:
key = match.group(1)
parsed[key] = line
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'address is (\S+)', data)
if match:
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Internet address is (\S+)/(\d+)', data)
if match:
addr = match.group(1)
masklen = int(match.group(2))
return dict(address=addr, masklen=masklen)
def parse_mtu(self, data):
match = re.search(r'MTU (\d+)', data)
if match:
return int(match.group(1))
def parse_bandwidth(self, data):
match = re.search(r'BW (\d+)', data)
if match:
return int(match.group(1))
def parse_duplex(self, data):
match = re.search(r'(\w+) Duplex', data, re.M)
if match:
return match.group(1)
def parse_type(self, data):
match = re.search(r'Hardware is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lineprotocol(self, data):
match = re.search(r'line protocol is (.+)\s+?$', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lldp_intf(self, data):
match = re.search(r'^Local Interface: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_host(self, data):
match = re.search(r'System Name: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_port(self, data):
match = re.search(r'Port id: (.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key]())
try:
for inst in instances:
commands = inst.commands()
responses = run_commands(module, commands)
results = dict(zip(commands, responses))
inst.populate(results)
facts.update(inst.facts)
except Exception:
module.exit_json(out=module.from_json(results))
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
Valeureux/wezer-exchange | __unreviewed__/community/res_config.py | 1 | 7508 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron and Valeureux Copyright Valeureux.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import SUPERUSER_ID
class CommunityModuleConfiguration(osv.osv_memory):
"""
Add a configuration form to install community modules
"""
_name = 'community.module.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_community_blog': fields.boolean('Install Blog module'),
# 'module_community_crowdfunding':
# fields.boolean('Install crowdfunding'),
'module_community_crm': fields.boolean('Install CRM module'),
'module_community_event': fields.boolean('Install Event module'),
'module_community_forum': fields.boolean('Install Forum module'),
'module_community_marketplace': fields.boolean(
'Install Marketplace module'
),
'wallet_chart': fields.selection(
[
('l10n_fr_wallet', 'French chart of account')
], string="Wallet Chart"),
'module_community_project': fields.boolean('Install Project module'),
}
def onchange_marketplace(self, cr, uid, ids, marketplace, context=None):
# Reset wallet chart with get_default value if we
# change the marketplace checkbox
res = {'value': {'wallet_chart': False}}
default = self.get_default_wallet_chart(cr, uid, '', context=None)
if default['wallet_chart'] and marketplace:
res['value']['wallet_chart'] = default['wallet_chart']
return res
def get_default_wallet_chart(self, cr, uid, fields, context=None):
# Get install wallet chart
module_obj = self.pool.get('ir.module.module')
installed_module_ids = module_obj.search(
cr, uid, [('state', 'in', ['installed', 'to upgrade'])],
context=context
)
installed_modules = []
for module in module_obj.browse(
cr, uid, installed_module_ids, context=context
):
installed_modules.append(module.name)
res = {'wallet_chart': False}
if 'l10n_fr_wallet' in installed_modules:
res = {'wallet_chart': 'l10n_fr_wallet'}
return res
def set_wallet_chart(self, cr, uid, ids, context=None):
# Install wallet chart
ir_module = self.pool['ir.module.module']
config = self.browse(cr, uid, ids[0], context)
if config.wallet_chart:
module_ids = ir_module.search(
cr, uid, [('name', '=', config.wallet_chart)], context=context
)
if module_ids:
ir_module.button_immediate_install(
cr, uid, module_ids, context=context
)
def execute(self, cr, uid, ids, context=None):
# Install or uninstall specified modules
ir_module = self.pool['ir.module.module']
classified = self._get_classified_fields(cr, uid, context=context)
config = self.browse(cr, uid, ids[0], context)
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
res = super(CommunityModuleConfiguration, self).execute(
cr, SUPERUSER_ID, ids, context=context
)
to_install_dependencies = []
modules = []
for module in to_install:
modules.append(module[0])
installed_module_ids = ir_module.search(
cr, uid, [('state', 'in', ['installed', 'to upgrade'])],
context=context
)
installed_modules = []
for module in ir_module.browse(
cr, uid, installed_module_ids, context=context
):
installed_modules.append(module.name)
if 'community_marketplace' in modules and 'community_project' \
in installed_modules or 'community_project' in modules \
and 'community_marketplace' in installed_modules:
to_install_dependencies.append('project_marketplace')
# if 'community_crowdfunding' in modules and 'community_project'
# in installed_modules or 'community_project' in modules
# and 'community_crowdfunding' in installed_modules:
# to_install_dependencies.append('project_crowdfunding')
# if 'community_crowdfunding' in modules and 'community_marketplace'
# in installed_modules or 'community_marketplace' in modules
# and 'community_marketplace' in installed_modules:
# to_install_dependencies.append('marketplace_crowdfunding')
to_install_final = []
module_ids = ir_module.search(
cr, uid, [('name', 'in', to_install_dependencies)], context=context
)
for module in ir_module.browse(cr, uid, module_ids, context=context):
to_install_final.append((module.name, module))
to_uninstall_dependencies = []
modules = []
for module in ir_module.browse(
cr, uid, to_uninstall_ids, context=context
):
modules.append(module.name)
if 'community_blog' in modules:
to_uninstall_dependencies.append('website_blog')
# if 'community_crowdfunding' in modules:
# to_uninstall_dependencies.append('crowdfunding')
if 'community_crm' in modules:
to_uninstall_dependencies.append('crm')
if 'community_event' in modules:
to_uninstall_dependencies.append('event')
if 'community_forum' in modules:
to_uninstall_dependencies.append('website_forum')
if 'community_marketplace' in modules:
to_uninstall_dependencies.append('account_wallet')
if 'community_project' in modules:
to_uninstall_dependencies.append('project')
to_uninstall_final_ids = []
module_ids = ir_module.search(
cr, uid, [('name', 'in', to_uninstall_dependencies)],
context=context
)
for module in ir_module.browse(cr, uid, module_ids, context=context):
to_uninstall_final_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(
cr, SUPERUSER_ID, to_uninstall_final_ids, context=context
)
self._install_modules(
cr, SUPERUSER_ID, to_install_final, context=context
)
return res
| agpl-3.0 |
Balachan27/django | tests/template_tests/filter_tests/test_time.py | 326 | 1812 | from datetime import time
from django.template.defaultfilters import time as time_filter
from django.test import SimpleTestCase
from django.utils import timezone
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeTests(TimezoneTestCase):
"""
#20693: Timezone support for the time template filter
"""
@setup({'time01': '{{ dt|time:"e:O:T:Z" }}'})
def test_time01(self):
output = self.engine.render_to_string('time01', {'dt': self.now_tz_i})
self.assertEqual(output, '+0315:+0315:+0315:11700')
@setup({'time02': '{{ dt|time:"e:T" }}'})
def test_time02(self):
output = self.engine.render_to_string('time02', {'dt': self.now})
self.assertEqual(output, ':' + self.now_tz.tzinfo.tzname(self.now_tz))
@setup({'time03': '{{ t|time:"P:e:O:T:Z" }}'})
def test_time03(self):
output = self.engine.render_to_string('time03', {'t': time(4, 0, tzinfo=timezone.get_fixed_timezone(30))})
self.assertEqual(output, '4 a.m.::::')
@setup({'time04': '{{ t|time:"P:e:O:T:Z" }}'})
def test_time04(self):
output = self.engine.render_to_string('time04', {'t': time(4, 0)})
self.assertEqual(output, '4 a.m.::::')
@setup({'time05': '{{ d|time:"P:e:O:T:Z" }}'})
def test_time05(self):
output = self.engine.render_to_string('time05', {'d': self.today})
self.assertEqual(output, '')
@setup({'time06': '{{ obj|time:"P:e:O:T:Z" }}'})
def test_time06(self):
output = self.engine.render_to_string('time06', {'obj': 'non-datetime-value'})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_inputs(self):
self.assertEqual(time_filter(time(13), 'h'), '01')
self.assertEqual(time_filter(time(0), 'h'), '12')
| bsd-3-clause |
TUW-GEO/rt1 | rt1/surface.py | 1 | 29407 | """Definition of BRDF functions"""
import numpy as np
import sympy as sp
from functools import partial, update_wrapper, lru_cache
from .scatter import Scatter
from .rtplots import polarplot, hemreflect
class Surface(Scatter):
"""basic surface class"""
def __init__(self, **kwargs):
# set scattering angle generalization-matrix to [1,1,1] if it is not
# explicitly provided by the chosen class.
# this results in a peak in specular-direction which is suitable
# for describing surface BRDF's
self.a = getattr(self, "a", [1.0, 1.0, 1.0])
self.NormBRDF = kwargs.pop("NormBRDF", 1.0)
# quick way for visualizing the functions as polarplot
self.polarplot = partial(polarplot, X=self)
update_wrapper(self.polarplot, polarplot)
# quick way for visualizing the associated hemispherical reflectance
self.hemreflect = partial(hemreflect, SRF=self)
update_wrapper(self.hemreflect, hemreflect)
@lru_cache()
def _lambda_func(self, *args):
# define sympy objects
theta_0 = sp.Symbol("theta_0")
theta_ex = sp.Symbol("theta_ex")
phi_0 = sp.Symbol("phi_0")
phi_ex = sp.Symbol("phi_ex")
# replace arguments and evaluate expression
# sp.lambdify is used to allow array-inputs
args = (theta_0, theta_ex, phi_0, phi_ex) + tuple(args)
pfunc = sp.lambdify(args, self._func, modules=["numpy", "sympy"])
return pfunc
def brdf(self, t_0, t_ex, p_0, p_ex, param_dict={}):
"""
Calculate numerical value of the BRDF for chosen
incidence- and exit angles.
Parameters
----------
t_0 : array_like(float)
array of incident zenith-angles in radians
p_0 : array_like(float)
array of incident azimuth-angles in radians
t_ex : array_like(float)
array of exit zenith-angles in radians
p_ex : array_like(float)
array of exit azimuth-angles in radians
Returns
-------
array_like(float)
Numerical value of the BRDF
"""
# if an explicit numeric function is provided, use it, otherwise
# lambdify the available sympy-function
if hasattr(self, "_func_numeric"):
brdffunc = self._func_numeric
else:
brdffunc = self._lambda_func(*param_dict.keys())
# in case _func is a constant, lambdify will produce a function with
# scalar output which is not suitable for further processing
# (this happens e.g. for the Isotropic brdf).
# The following query is implemented to ensure correct array-output:
# TODO this is not a proper test !
if not isinstance(
brdffunc(
np.array([0.1, 0.2, 0.3]),
0.1,
0.1,
0.1,
**{key: 0.12 for key in param_dict.keys()}
),
np.ndarray,
):
brdffunc = np.vectorize(brdffunc)
return brdffunc(t_0, t_ex, p_0, p_ex, **param_dict)
def legexpansion(self, t_0, t_ex, p_0, p_ex, geometry):
"""
Definition of the legendre-expansion of the BRDF
.. note::
The output represents the legendre-expansion as needed to
compute the fn-coefficients for the chosen geometry!
(http://rt1.readthedocs.io/en/latest/theory.html#equation-fn_coef_definition)
The incidence-angle argument of the legexpansion() is different
to the documentation due to the direct definition of the argument
as the zenith-angle (t_0) instead of the incidence-angle
defined in a spherical coordinate system (t_i).
They are related via: t_i = pi - t_0
Parameters
----------
t_0 : array_like(float)
array of incident zenith-angles in radians
p_0 : array_like(float)
array of incident azimuth-angles in radians
t_ex : array_like(float)
array of exit zenith-angles in radians
p_ex : array_like(float)
array of exit azimuth-angles in radians
geometry : str
4 character string specifying which components of the angles should
be fixed or variable. This is done to significantly speed up the
evaluation-process of the fn-coefficient generation
The 4 characters represent in order the properties of:
t_0, t_ex, p_0, p_ex
- 'f' indicates that the angle is treated 'fixed'
(i.e. as a numerical constant)
- 'v' indicates that the angle is treated 'variable'
(i.e. as a sympy-variable)
- Passing geometry = 'mono' indicates a monstatic geometry
(i.e.: t_ex = t_0, p_ex = p_0 + pi)
If monostatic geometry is used, the input-values of t_ex and p_ex
have no effect on the calculations!
For detailed information on the specification of the
geometry-parameter, please have a look at the
"Evaluation Geometries" section of the documentation
(http://rt1.readthedocs.io/en/latest/model_specification.html#evaluation-geometries)
Returns
-------
sympy - expression
The legendre - expansion of the BRDF for the chosen geometry
"""
assert self.ncoefs > 0
theta_s = sp.Symbol("theta_s")
phi_s = sp.Symbol("phi_s")
NBRDF = self.ncoefs
n = sp.Symbol("n")
# define sympy variables based on chosen geometry
if geometry == "mono":
assert len(np.unique(p_0)) == 1, (
"p_0 must contain only a "
+ "single unique value for monostatic geometry"
)
theta_0 = sp.Symbol("theta_0")
theta_ex = theta_0
phi_0 = np.unique(p_0)[0]
phi_ex = np.unique(p_0)[0] + sp.pi
else:
if geometry[0] == "v":
theta_0 = sp.Symbol("theta_0")
elif geometry[0] == "f":
assert len(np.unique(t_0)) == 1, (
"t_0 must contain only a "
+ "single unique value for geometry[0] == f"
)
theta_0 = np.unique(t_0)[0]
else:
raise AssertionError("wrong choice of theta_0 geometry")
if geometry[1] == "v":
theta_ex = sp.Symbol("theta_ex")
elif geometry[1] == "f":
assert len(np.unique(t_ex)) == 1, (
"t_ex must contain only"
+ " a single unique value for geometry[1] == f"
)
theta_ex = np.unique(t_ex)[0]
else:
raise AssertionError("wrong choice of theta_ex geometry")
if geometry[2] == "v":
phi_0 = sp.Symbol("phi_0")
elif geometry[2] == "f":
assert len(np.unique(p_0)) == 1, (
"p_0 must contain only"
+ " a single unique value for geometry[2] == f"
)
phi_0 = np.unique(p_0)[0]
else:
raise AssertionError("wrong choice of phi_0 geometry")
if geometry[3] == "v":
phi_ex = sp.Symbol("phi_ex")
elif geometry[3] == "f":
assert len(np.unique(p_0)) == 1, (
"p_ex must contain only"
+ " a single unique value for geometry[3] == f"
)
phi_ex = np.unique(p_ex)[0]
else:
raise AssertionError("wrong choice of phi_ex geometry")
return sp.Sum(
self.legcoefs
* sp.legendre(n, self.scat_angle(theta_s, theta_ex, phi_s, phi_ex, self.a)),
(n, 0, NBRDF - 1),
)
def brdf_theta_diff(
self,
t_0,
t_ex,
p_0,
p_ex,
geometry,
param_dict={},
return_symbolic=False,
n=1,
):
"""
Calculation of the derivative of the BRDF with respect to
the scattering-angles t_ex
Parameters
----------
t_0 : array_like(float)
array of incident zenith-angles in radians
p_0 : array_like(float)
array of incident azimuth-angles in radians
t_ex : array_like(float)
array of exit zenith-angles in radians
p_ex : array_like(float)
array of exit azimuth-angles in radians
geometry : str
4 character string specifying which components of the angles should
be fixed or variable. This is done to significantly speed up the
evaluation-process of the fn-coefficient generation
The 4 characters represent in order the properties of:
t_0, t_ex, p_0, p_ex
- 'f' indicates that the angle is treated 'fixed'
(i.e. as a numerical constant)
- 'v' indicates that the angle is treated 'variable'
(i.e. as a sympy-variable)
- Passing geometry = 'mono' indicates a monstatic geometry
(i.e.: t_ex = t_0, p_ex = p_0 + pi)
If monostatic geometry is used, the input-values of t_ex and p_ex
have no effect on the calculations!
For detailed information on the specification of the
geometry-parameter, please have a look at the
"Evaluation Geometries" section of the documentation
(http://rt1.readthedocs.io/en/latest/model_specification.html#evaluation-geometries)
return_symbolic : bool (default = False)
indicator if symbolic result
should be returned
n : int (default = 1)
order of derivatives (d^n / d_theta^n)
Returns
-------
sympy - expression
The derivative of the BRDF with espect to the excident angle
t_ex for the chosen geometry
"""
# define sympy variables based on chosen geometry
if geometry == "mono":
assert len(np.unique(p_0)) == 1, (
"p_0 must contain only a "
+ "single unique value for monostatic geometry"
)
theta_0 = sp.Symbol("theta_0")
theta_ex = theta_0
phi_0 = np.unique(p_0)[0]
phi_ex = np.unique(p_0)[0] + sp.pi
t_ex = t_0
p_ex = p_0 + np.pi
else:
if geometry[0] == "v":
theta_0 = sp.Symbol("theta_0")
elif geometry[0] == "f":
assert len(np.unique(t_0)) == 1, (
"t_0 must contain only a "
+ "single unique value for geometry[0] == f"
)
theta_0 = np.unique(t_0)[0]
else:
raise AssertionError("wrong choice of theta_0 geometry")
if geometry[1] == "v":
theta_ex = sp.Symbol("theta_ex")
elif geometry[1] == "f":
assert len(np.unique(t_ex)) == 1, (
"t_ex must contain only"
+ " a single unique value for geometry[1] == f"
)
theta_ex = np.unique(t_ex)[0]
else:
raise AssertionError("wrong choice of theta_ex geometry")
if geometry[2] == "v":
phi_0 = sp.Symbol("phi_0")
elif geometry[2] == "f":
assert len(np.unique(p_0)) == 1, (
"p_0 must contain only"
+ " a single unique value for geometry[2] == f"
)
phi_0 = np.unique(p_0)[0]
else:
raise AssertionError("wrong choice of phi_0 geometry")
if geometry[3] == "v":
phi_ex = sp.Symbol("phi_ex")
elif geometry[3] == "f":
assert len(np.unique(p_0)) == 1, (
"p_ex must contain only"
+ " a single unique value for geometry[3] == f"
)
phi_ex = np.unique(p_ex)[0]
else:
raise AssertionError("wrong choice of phi_ex geometry")
if geometry[1] == "f":
dfunc_dtheta_0 = 0.0
else:
func = self._func.xreplace(
{
sp.Symbol("theta_0"): theta_0,
sp.Symbol("theta_ex"): theta_ex,
sp.Symbol("phi_0"): phi_0,
sp.Symbol("phi_ex"): phi_ex,
}
)
dfunc_dtheta_0 = sp.diff(func, theta_ex, n)
if return_symbolic is True:
return dfunc_dtheta_0
else:
args = (
sp.Symbol("theta_0"),
sp.Symbol("theta_ex"),
sp.Symbol("phi_0"),
sp.Symbol("phi_ex"),
) + tuple(param_dict.keys())
brdffunc = sp.lambdify(args, dfunc_dtheta_0, modules=["numpy", "sympy"])
# in case _func is a constant, lambdify will produce a function
# with scalar output which is not suitable for further processing
# (this happens e.g. for the Isotropic brdf).
# The following query is implemented to ensure correct array-output
# TODO this is not a proper test !
if not isinstance(
brdffunc(
np.array([0.1, 0.2, 0.3]),
0.1,
0.1,
0.1,
**{key: 0.12 for key in param_dict.keys()}
),
np.ndarray,
):
brdffunc = np.vectorize(brdffunc)
return brdffunc(t_0, t_ex, p_0, p_ex, **param_dict)
class LinCombSRF(Surface):
"""
Class to generate linear-combinations of volume-class elements
For details please look at the documentation
(http://rt1.readthedocs.io/en/latest/model_specification.html#linear-combination-of-scattering-distributions)
Parameters
----------
SRFchoices : [ [float, Surface] , [float, Surface] , ...]
A list that contains the the individual BRDF's
(Surface-objects) and the associated weighting-factors
(floats) for the linear-combination.
NormBRDf : scalar(float)
Hemispherical reflectance of the combined BRDF
ATTENTION: NormBRDF-values provided within the SRFchoices-list
will not be considered!
"""
def __init__(self, SRFchoices=None, **kwargs):
super(LinCombSRF, self).__init__(**kwargs)
self.SRFchoices = SRFchoices
self._set_legexpansion()
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
return self._SRFcombiner()._func
def _set_legexpansion(self):
"""set legexpansion to the combined legexpansion"""
self.ncoefs = self._SRFcombiner().ncoefs
self.legexpansion = self._SRFcombiner().legexpansion
def _SRFcombiner(self):
"""
Returns a Surface-class element based on an input-array of
Surface-class elements.
The array must be shaped in the form:
SRFchoices = [ [ weighting-factor , Surface-class element ],
[ weighting-factor , Surface-class element ],
...]
ATTENTION: the .legexpansion()-function of the combined surface-class
element is no longer related to its legcoefs (which are set to 0.)
since the individual legexpansions of the combined surface-
class elements are possibly evaluated with a different
a-parameter of the generalized scattering angle! This does
not affect any calculations, since the evaluation is
only based on the use of the .legexpansion()-function.
"""
class BRDFfunction(Surface):
"""
dummy-Surface-class object used to generate
linear-combinations of BRDF-functions
"""
def __init__(self, **kwargs):
super(BRDFfunction, self).__init__(**kwargs)
self._func = 0.0
self.legcoefs = 0.0
# initialize a combined phase-function class element
SRFcomb = BRDFfunction(NormBRDf=self.NormBRDF)
# set ncoefs of the combined volume-class element to the maximum
SRFcomb.ncoefs = max([SRF[1].ncoefs for SRF in self.SRFchoices])
# number of coefficients within the chosen functions.
# (this is necessary for correct evaluation of fn-coefficients)
# find BRDF functions with equal a parameters
equals = [
np.where(
(np.array([VV[1].a for VV in self.SRFchoices]) == tuple(V[1].a)).all(
axis=1
)
)[0]
for V in self.SRFchoices
]
# evaluate index of BRDF-functions that have equal a parameter
# find phase functions where a-parameter is equal
equal_a = list({tuple(row) for row in equals})
# evaluation of combined expansion in legendre-polynomials
dummylegexpansion = []
for i in range(0, len(equal_a)):
SRFdummy = BRDFfunction()
# select SRF choices where a parameter is equal
SRFequal = np.take(self.SRFchoices, equal_a[i], axis=0)
# set ncoefs to the maximum number within the choices
# with equal a-parameter
SRFdummy.ncoefs = max([SRF[1].ncoefs for SRF in SRFequal])
# loop over phase-functions with equal a-parameter
for SRF in SRFequal:
# set parameters based on chosen phase-functions and evaluate
# combined legendre-expansion
SRFdummy.a = SRF[1].a
SRFdummy.NormBRDF = SRF[1].NormBRDF
SRFdummy._func = SRFdummy._func + SRF[1]._func * SRF[0]
SRFdummy.legcoefs += SRF[1].legcoefs * SRF[0]
dummylegexpansion = dummylegexpansion + [SRFdummy.legexpansion]
# combine legendre-expansions for each a-parameter based on given
# combined legendre-coefficients
SRFcomb.legexpansion = lambda t_0, t_ex, p_0, p_ex, geometry: np.sum(
[lexp(t_0, t_ex, p_0, p_ex, geometry) for lexp in dummylegexpansion]
)
for SRF in self.SRFchoices:
# set parameters based on chosen classes to define analytic
# function representation
SRFcomb._func = SRFcomb._func + SRF[1]._func * SRF[0]
return SRFcomb
class Isotropic(Surface):
"""
Define an isotropic surface brdf
Parameters
----------
NormBRDF : float, optional (default = 1.)
Normalization-factor used to scale the BRDF,
i.e. BRDF = NormBRDF * f(t_0,p_0,t_ex,p_ex)
"""
def __init__(self, **kwargs):
super(Isotropic, self).__init__(**kwargs)
@property
def ncoefs(self):
# make ncoefs a property since it is fixed and should not be changed
# only 1 coefficient is needed to correctly represent
# the Isotropic scattering function
return 1
@property
@lru_cache()
def legcoefs(self):
n = sp.Symbol("n")
return (1.0 / sp.pi) * sp.KroneckerDelta(0, n)
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
return 1.0 / sp.pi
class CosineLobe(Surface):
"""
Define a (possibly generalized) cosine-lobe of power i.
Parameters
----------
i : scalar(int)
Power of the cosine lobe, i.e. cos(x)^i
ncoefs : scalar(int)
Number of coefficients used within the Legendre-approximation
a : [ float , float , float ] , optional (default = [1.,1.,1.])
generalized scattering angle parameters used for defining the
scat_angle() of the BRDF
(http://rt1.readthedocs.io/en/latest/theory.html#equation-general_scat_angle)
NormBRDF : float, optional (default = 1.)
Normalization-factor used to scale the BRDF,
i.e. BRDF = NormBRDF * f(t_0,p_0,t_ex,p_ex)
"""
def __init__(self, ncoefs=None, i=None, a=[1.0, 1.0, 1.0], **kwargs):
assert ncoefs is not None, (
"Error: number of coefficients " + "needs to be provided!"
)
assert i is not None, "Error: Cosine lobe power needs to be specified!"
super(CosineLobe, self).__init__(**kwargs)
assert ncoefs > 0
self.i = i
assert isinstance(self.i, int), (
"Error: Cosine lobe power needs " + "to be an integer!"
)
assert i >= 0, "ERROR: Power of Cosine-Lobe needs to be greater than 0"
self.a = a
assert isinstance(self.a, list), (
"Error: Generalization-parameter " + "needs to be a list"
)
assert len(a) == 3, (
"Error: Generalization-parameter list must " + "contain 3 values"
)
assert all(type(x) == float for x in a), (
"Error: Generalization-"
+ "parameter array must "
+ "contain only floating-"
+ "point values!"
)
self.ncoefs = int(ncoefs)
@property
@lru_cache()
def legcoefs(self):
n = sp.Symbol("n")
# A13 The Rational(is needed as otherwise a Gamma function
# Pole error is issued)
return (
1.0
/ sp.pi
* (
(
2 ** (-2 - self.i)
* (1 + 2 * n)
* sp.sqrt(sp.pi)
* sp.gamma(1 + self.i)
)
/ (
sp.gamma((2 - n + self.i) * sp.Rational(1, 2))
* sp.gamma((3 + n + self.i) * sp.Rational(1, 2))
)
)
)
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
theta_0 = sp.Symbol("theta_0")
theta_ex = sp.Symbol("theta_ex")
phi_0 = sp.Symbol("phi_0")
phi_ex = sp.Symbol("phi_ex")
# self._func = sp.Max(self.scat_angle(theta_i,
# theta_s,
# phi_i,
# phi_s,
# a=self.a), 0.)**self.i # eq. A13
# alternative formulation avoiding the use of sp.Max()
# (this is done because sp.lambdify('x',sp.Max(x), "numpy")
# generates a function that can not interpret array inputs.)
x = self.scat_angle(theta_0, theta_ex, phi_0, phi_ex, a=self.a)
return 1.0 / sp.pi * (x * (1.0 + sp.sign(x)) / 2.0) ** self.i
class HenyeyGreenstein(Surface):
"""
Define a HenyeyGreenstein scattering function for use as BRDF
approximation function.
Parameters
----------
t : scalar(float)
Asymmetry parameter of the Henyey-Greenstein function
ncoefs : scalar(int)
Number of coefficients used within the Legendre-approximation
a : [ float , float , float ] , optional (default = [1.,1.,1.])
generalized scattering angle parameters used for defining the
scat_angle() of the BRDF
(http://rt1.readthedocs.io/en/latest/theory.html#equation-general_scat_angle)
NormBRDF : float, optional (default = 1.)
Normalization-factor used to scale the BRDF,
i.e. BRDF = NormBRDF * f(t_0,p_0,t_ex,p_ex)
"""
def __init__(self, t=None, ncoefs=None, a=[1.0, 1.0, 1.0], **kwargs):
assert t is not None, "t parameter needs to be provided!"
assert ncoefs is not None, "Number of coeff. needs to be specified"
super(HenyeyGreenstein, self).__init__(**kwargs)
self.t = t
self.ncoefs = ncoefs
assert self.ncoefs > 0
self.a = a
assert isinstance(self.a, list), (
"Error: Generalization-parameter " + "needs to be a list"
)
assert len(a) == 3, (
"Error: Generalization-parameter list must " + "contain 3 values"
)
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
theta_0 = sp.Symbol("theta_0")
theta_ex = sp.Symbol("theta_ex")
phi_0 = sp.Symbol("phi_0")
phi_ex = sp.Symbol("phi_ex")
x = self.scat_angle(theta_0, theta_ex, phi_0, phi_ex, a=self.a)
return (
1.0
* (1.0 - self.t ** 2.0)
/ ((sp.pi) * (1.0 + self.t ** 2.0 - 2.0 * self.t * x) ** 1.5)
)
@property
@lru_cache()
def legcoefs(self):
n = sp.Symbol("n")
return 1.0 * (1.0 / (sp.pi)) * (2.0 * n + 1) * self.t ** n
class HG_nadirnorm(Surface):
"""
Define a HenyeyGreenstein scattering function for use as BRDF
approximation function.
Parameters
----------
t : scalar(float)
Asymmetry parameter of the Henyey-Greenstein function
ncoefs : scalar(int)
Number of coefficients used within the Legendre-approximation
a : [ float , float , float ] , optional (default = [1.,1.,1.])
generalized scattering angle parameters used for defining the
scat_angle() of the BRDF
(http://rt1.readthedocs.io/en/latest/theory.html#equation-general_scat_angle)
NormBRDF : float, optional (default = 1.)
Normalization-factor used to scale the BRDF,
i.e. BRDF = NormBRDF * f(t_0,p_0,t_ex,p_ex)
"""
def __init__(self, t=None, ncoefs=None, a=[1.0, 1.0, 1.0], **kwargs):
assert t is not None, "t parameter needs to be provided!"
assert ncoefs is not None, "Number of coeffs needs to be specified"
super(HG_nadirnorm, self).__init__(**kwargs)
self.t = t
self.ncoefs = ncoefs
assert self.ncoefs > 0
self.a = a
assert isinstance(self.a, list), (
"Error: Generalization-parameter " + "needs to be a list"
)
assert len(a) == 3, (
"Error: Generalization-parameter list must " + "contain 3 values"
)
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
theta_0 = sp.Symbol("theta_0")
theta_ex = sp.Symbol("theta_ex")
phi_0 = sp.Symbol("phi_0")
phi_ex = sp.Symbol("phi_ex")
x = self.scat_angle(theta_0, theta_ex, phi_0, phi_ex, a=self.a)
nadir_hemreflect = 4 * (
(1.0 - self.t ** 2.0)
* (
1.0
- self.t * (-self.t + self.a[0])
- sp.sqrt(
(1 + self.t ** 2 - 2 * self.a[0] * self.t) * (1 + self.t ** 2)
)
)
/ (
2.0
* self.a[0] ** 2.0
* self.t ** 2.0
* sp.sqrt(1.0 + self.t ** 2.0 - 2.0 * self.a[0] * self.t)
)
)
func = (1.0 / nadir_hemreflect) * (
(1.0 - self.t ** 2.0)
/ ((sp.pi) * (1.0 + self.t ** 2.0 - 2.0 * self.t * x) ** 1.5)
)
return func
def _func_numeric(self, theta_0, theta_ex, phi_0, phi_ex, **kwargs):
"""direct numeric version of _func"""
if isinstance(self.t, sp.Symbol):
t = kwargs[str(self.t)]
else:
t = self.t
x = self._scat_angle_numeric(theta_0, theta_ex, phi_0, phi_ex, a=self.a)
nadir_hemreflect = 4 * (
(1.0 - t ** 2.0)
* (
1.0
- t * (-t + self.a[0])
- np.sqrt(
(1 + t ** 2 - 2 * self.a[0] * t) * (1 + t ** 2)
)
)
/ (
2.0
* self.a[0] ** 2.0
* t ** 2.0
* np.sqrt(1.0 + t ** 2.0 - 2.0 * self.a[0] * t)
)
)
func = (1.0 / nadir_hemreflect) * (
(1.0 - t ** 2.0)
/ ((np.pi) * (1.0 + t ** 2.0 - 2.0 * t * x) ** 1.5)
)
return func
@property
@lru_cache()
def legcoefs(self):
nadir_hemreflect = 4 * (
(1.0 - self.t ** 2.0)
* (
1.0
- self.t * (-self.t + self.a[0])
- sp.sqrt(
(1 + self.t ** 2 - 2 * self.a[0] * self.t) * (1 + self.t ** 2)
)
)
/ (
2.0
* self.a[0] ** 2.0
* self.t ** 2.0
* sp.sqrt(1.0 + self.t ** 2.0 - 2.0 * self.a[0] * self.t)
)
)
n = sp.Symbol("n")
legcoefs = (1.0 / nadir_hemreflect) * (
(1.0 / (sp.pi)) * (2.0 * n + 1) * self.t ** n
)
return legcoefs
| apache-2.0 |
sandeepkbhat/pylearn2 | pylearn2/optimization/linear_cg.py | 49 | 2914 | """
.. todo::
WRITEME
"""
import theano
from theano import tensor
from theano.ifelse import ifelse
def linear_cg(fn, params, tol=1e-3, max_iters=1000, floatX=None):
"""
Minimizes a POSITIVE DEFINITE quadratic function via linear conjugate
gradient using the R operator to avoid explicitly representing the Hessian.
If you have several variables, this is cheaper than Newton's method, which
would need to invert the Hessian. It is also cheaper than standard linear
conjugate gradient, which works with an explicit representation of the
Hessian. It is also cheaper than nonlinear conjugate gradient which does a
line search by repeatedly evaluating f.
For more information about linear conjugate gradient, you may look at
http://en.wikipedia.org/wiki/Conjugate_gradient_method .
(This reference describes linear CG but not converting it to use
the R operator instead of an explicit representation of the Hessian)
Parameters
----------
params : WRITEME
f : theano_like
A theano expression which is quadratic with POSITIVE DEFINITE hessian
in x
x : list
List of theano shared variables that influence f
tol : float
Minimization halts when the norm of the gradient is smaller than tol
Returns
-------
rval : theano_like
The solution in form of a symbolic expression (or list of
symbolic expressions)
"""
provided_as_list = True
if not isinstance(params, (list,tuple)):
params = [params]
provided_as_list = False
n_params = len(params)
def loop(rsold, *args):
ps = args[:n_params]
rs = args[n_params:2*n_params]
xs = args[2*n_params:]
Aps = []
for param in params:
rval = tensor.Rop(tensor.grad(fn,param), params, ps)
if isinstance(rval, (list, tuple)):
Aps.append(rval[0])
else:
Aps.append(rval)
alpha = rsold/ sum( (x*y).sum() for x,y in zip(Aps, ps) )
xs = [ x - alpha*p for x,p in zip(xs,ps)]
rs = [ r - alpha*Ap for r,Ap in zip(rs,Aps)]
rsnew = sum( (r*r).sum() for r in rs)
ps = [ r + rsnew/rsold*p for r,p in zip(rs,ps)]
return [rsnew]+ps+rs+xs, theano.scan_module.until(rsnew < tol)
r0s = tensor.grad(fn, params)
if not isinstance(r0s, (list,tuple)):
r0s = [r0s]
p0s = [x for x in r0s]
x0s = params
rsold = sum( (r*r).sum() for r in r0s)
outs, updates = theano.scan( loop,
outputs_info = [rsold] + p0s+r0s+x0s,
n_steps = max_iters,
name = 'linear_conjugate_gradient')
fxs = outs[1+2*n_params:]
fxs = [ifelse(rsold <tol, x0, x[-1]) for x0,x in zip(x0s, fxs)]
if not provided_as_list:
return fxs[0]
else:
return fxs
| bsd-3-clause |
invisiblek/android_external_skia | tools/skpdiff/generate_pmetric_tables.py | 179 | 4156 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from math import *
COPYRIGHT = '''/*
* Copyright 2013 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/'''
HELP = '// To regenerate SkPMetricUtil_generated.h, simply run ./generate_pmetric_tables.py'
# From Barten SPIE 1989
def contrast_sensitivity(cycles_per_degree, luminance):
a = 440.0 * pow(1.0 + 0.7 / luminance, -0.2)
b = 0.3 * pow(1 + 100.0 / luminance, 0.15)
return a * cycles_per_degree * exp(-b * cycles_per_degree) * sqrt(1.0 + 0.06 * exp(b * cycles_per_degree))
# From Ward Larson Siggraph 1997
def threshold_vs_intensity(adaptation_luminance):
log_lum = float('-inf') # Works in Python 2.6+
try:
log_lum = log10(adaptation_luminance)
except ValueError:
pass
x = 0.0
if log_lum < -3.94:
x = -2.86
elif log_lum < -1.44:
x = pow(0.405 * log_lum + 1.6, 2.18) - 2.86
elif log_lum < -0.0184:
x = log_lum - 0.395
elif log_lum < 1.9:
x = pow(0.249 * log_lum + 0.65, 2.7) - 0.72
else:
x = log_lum - 1.255
return pow(10.0, x)
# From Daly 1993
def visual_mask(contrast):
x = pow(392.498 * contrast, 0.7)
x = pow(0.0153 * x, 4.0)
return pow(1.0 + x, 0.25)
# float gCubeRootTable[]
CUBE_ROOT_ACCESS_FUNCTION = '''
static float get_cube_root(float value) {
SkASSERT(value >= 0.0f);
SkASSERT(value * 1023.0f < 1024.0f);
return gCubeRootTable[(int)(value * 1023.0f)];
}
'''
def generate_cube_root_table(stream):
print('static float gCubeRootTable[] = {', end='', file=stream)
for i in range(1024):
if i % 6 == 0:
print('\n ', end='', file=stream)
print("%.10f" % pow(i / 1024.0, 1.0 / 3.0), end='f,', file=stream)
print('\n};', end='', file=stream)
print(CUBE_ROOT_ACCESS_FUNCTION, file=stream)
# float gGammaTable[]
GAMMA_ACCESS_FUNCTION = '''
static float get_gamma(unsigned char value) {
return gGammaTable[value];
}
'''
def generate_gamma_table(stream):
print('static float gGammaTable[] = {', end='', file=stream)
for i in range(256):
if i % 6 == 0:
print('\n ', end='', file=stream)
print("%.10f" % pow(i / 255.0, 2.2), end='f,', file=stream)
print('\n};', end='', file=stream)
print(GAMMA_ACCESS_FUNCTION, file=stream)
# float gTVITable[]
TVI_ACCESS_FUNCTION = '''
static float get_threshold_vs_intensity(float value) {
SkASSERT(value >= 0.0f);
SkASSERT(value < 100.0f);
return gTVITable[(int)(value * 100.0f)];
}
'''
def generate_tvi_table(stream):
print('static float gTVITable[] = {', end='', file=stream)
for i in range(10000):
if i % 6 == 0:
print('\n ', end='', file=stream)
print("%.10f" % threshold_vs_intensity(i / 100.0), end='f,', file=stream)
print('\n};', end='', file=stream)
print(TVI_ACCESS_FUNCTION, file=stream)
# float gVisualMaskTable[]
VISUAL_MASK_DOMAIN = 4000
VISUAL_MASK_ACCESS_FUNCTION = '''
static float get_visual_mask(float value) {{
SkASSERT(value >= 0.0f);
SkASSERT(value < {}.0f);
return gVisualMaskTable[(int)value];
}}'''
def generate_visual_mask_table(stream):
print('static float gVisualMaskTable[] = {', end='', file=stream)
for i in range(VISUAL_MASK_DOMAIN):
if i % 6 == 0:
print('\n ', end='', file=stream)
print("%.10f" % visual_mask(i), end='f,', file=stream)
print('\n};', end='', file=stream)
print(VISUAL_MASK_ACCESS_FUNCTION.format(VISUAL_MASK_DOMAIN), file=stream)
def generate_lookup_tables(stream):
print(COPYRIGHT, file=stream)
print(HELP, file=stream)
print('namespace SkPMetricUtil {', file=stream)
generate_cube_root_table(stream)
generate_gamma_table(stream)
generate_tvi_table(stream)
generate_visual_mask_table(stream)
print('}', file=stream)
def main():
pmetric_util_out = open('SkPMetricUtil_generated.h', 'wb')
generate_lookup_tables(pmetric_util_out)
pmetric_util_out.close()
if __name__ == '__main__':
main()
| bsd-3-clause |
ntt-sic/nova | nova/tests/api/openstack/compute/plugins/v3/test_quota_sets.py | 8 | 18489 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.plugins.v3 import quota_sets as quotas
from nova.api.openstack import wsgi
from nova import context as context_maker
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
class QuotaSetsTest(test.TestCase):
def setUp(self):
super(QuotaSetsTest, self).setUp()
self.controller = quotas.QuotaSetsController()
def _generate_quota_set(self, **kwargs):
quota_set = {
'quota_set': {'metadata_items': 128,
'ram': 51200,
'floating_ips': 10,
'fixed_ips': -1,
'instances': 10,
'injected_files': 5,
'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
'injected_file_path_bytes': 255}
}
quota_set['quota_set'].update(kwargs)
return quota_set
def _generate_detail_quota_set(self, **kwargs):
quota_set = {
'quota_set':
{'cores': {'in_use': 0, 'limit': 20, 'reserved': 0},
'fixed_ips': {'in_use': 0, 'limit': -1, 'reserved': 0},
'floating_ips': {'in_use': 0, 'limit': 10, 'reserved': 0},
'injected_files': {'in_use': 0, 'limit': 5, 'reserved': 0},
'instances': {'in_use': 0, 'limit': 10, 'reserved': 0},
'key_pairs': {'in_use': 0, 'limit': 100, 'reserved': 0},
'metadata_items': {'in_use': 0, 'limit': 128, 'reserved': 0},
'ram': {'in_use': 0, 'limit': 51200, 'reserved': 0},
'security_groups': {'in_use': 0, 'limit': 10, 'reserved': 0},
'injected_file_content_bytes':
{'in_use': 0, 'limit': 10240, 'reserved': 0},
'injected_file_path_bytes':
{'in_use': 0, 'limit': 255, 'reserved': 0},
'security_group_rules':
{'in_use': 0, 'limit': 20, 'reserved': 0}}
}
quota_set['quota_set'].update(kwargs)
return quota_set
def test_format_quota_set(self):
raw_quota_set = self._generate_quota_set()['quota_set']
quota_set = self.controller._format_quota_set('1234', raw_quota_set)
qs = quota_set['quota_set']
self.assertEqual(qs['id'], '1234')
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['fixed_ips'], -1)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20)
self.assertEqual(qs['key_pairs'], 100)
def test_quotas_defaults(self):
uri = '/os-quota-sets/defaults'
req = fakes.HTTPRequestV3.blank(uri)
res_dict = self.controller.defaults(req, 'fake_tenant')
expected = self._generate_quota_set(id='fake_tenant')
self.assertEqual(res_dict, expected)
def test_quotas_show_as_admin(self):
req = fakes.HTTPRequestV3.blank('/os-quota-sets/1234',
use_admin_context=True)
res_dict = self.controller.show(req, '1234')
self.assertEqual(res_dict, self._generate_quota_set(id='1234'))
def test_quotas_show_as_unauthorized_user(self):
req = fakes.HTTPRequestV3.blank('/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, '1234')
def test_quotas_detail_as_admin(self):
uri = '/os-quota-sets/1234/detail'
req = fakes.HTTPRequestV3.blank(uri, use_admin_context=True)
res_dict = self.controller.detail(req, '1234')
self.assertEqual(res_dict, self._generate_detail_quota_set(id='1234'))
def test_quotas_detail_as_unauthorized_user(self):
req = fakes.HTTPRequestV3.blank('/os-quota-sets/1234/detail')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.detail,
req, 1234)
def test_quotas_update_as_admin(self):
id = 'update_me'
body = self._generate_quota_set()
req = fakes.HTTPRequestV3.blank('/os-quota-sets/' + id,
use_admin_context=True)
res_dict = self.controller.update(req, id, body)
body['quota_set'].update(id=id)
self.assertEqual(res_dict, body)
def test_quotas_update_zero_value_as_admin(self):
id = 'update_me'
body = {'quota_set': {'instances': 0, 'cores': 0,
'ram': 0, 'floating_ips': 0,
'fixed_ips': 0, 'metadata_items': 0,
'injected_files': 0,
'injected_file_content_bytes': 0,
'injected_file_path_bytes': 0,
'security_groups': 0,
'security_group_rules': 0,
'key_pairs': 100, 'fixed_ips': -1}}
req = fakes.HTTPRequestV3.blank('/os-quota-sets/' + id,
use_admin_context=True)
res_dict = self.controller.update(req, id, body)
body['quota_set'].update(id=id)
self.assertEqual(res_dict, body)
def test_quotas_update_as_user(self):
body = self._generate_quota_set()
req = fakes.HTTPRequestV3.blank('/os-quota-sets/update_me')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_key(self):
body = self._generate_quota_set()
body['quota_set'].pop('instances')
body['quota_set']['instances2'] = 10
req = fakes.HTTPRequestV3.blank('/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_limit(self):
body = self._generate_quota_set(instances=-2)
req = fakes.HTTPRequestV3.blank('/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_value_json_fromat_empty_string(self):
id = 'update_me'
expected_resp = self._generate_quota_set(id=id)
# when PUT JSON format with empty string for quota
body = self._generate_quota_set(ram='')
req = fakes.HTTPRequestV3.blank('/os-quota-sets/' + id,
use_admin_context=True)
res_dict = self.controller.update(req, id, body)
self.assertEqual(res_dict, expected_resp)
def test_quotas_update_invalid_value_xml_fromat_empty_string(self):
id = 'update_me'
expected_resp = self._generate_quota_set(id=id)
# when PUT XML format with empty string for quota
body = self._generate_quota_set(ram={})
req = fakes.HTTPRequestV3.blank('/os-quota-sets/' + id,
use_admin_context=True)
res_dict = self.controller.update(req, id, body)
self.assertEqual(res_dict, expected_resp)
def test_quotas_update_invalid_value_non_int(self):
# when PUT non integer value
body = self._generate_quota_set(ram={}, instances=test)
req = fakes.HTTPRequestV3.blank('/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_delete_as_unauthorized_user(self):
req = fakes.HTTPRequestV3.blank('/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, '1234')
def test_quotas_delete_as_admin(self):
context = context_maker.get_admin_context()
self.req = fakes.HTTPRequestV3.blank('/os-quota-sets/1234')
self.req.environ['nova.context'] = context
self.mox.StubOutWithMock(quota.QUOTAS,
"destroy_all_by_project")
quota.QUOTAS.destroy_all_by_project(context, '1234')
self.mox.ReplayAll()
self.controller.delete(self.req, '1234')
self.mox.VerifyAll()
def test_quotas_update_exceed_in_used(self):
body = {'quota_set': {'cores': 10}}
self.stubs.Set(quotas.QuotaSetsController, '_get_quotas',
fake_get_quotas)
req = fakes.HTTPRequestV3.blank('/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_force_update_exceed_in_used(self):
id = 'update_me'
self.stubs.Set(quotas.QuotaSetsController, '_get_quotas',
fake_get_quotas)
req = fakes.HTTPRequestV3.blank('/os-quota-sets/' + id,
use_admin_context=True)
expected = {'quota_set': {'ram': 25600, 'instances': 200, 'cores': 10,
'id': id}}
body = {'quota_set': {'ram': 25600,
'instances': 200,
'cores': 10,
'force': 'True'}}
fake_quotas.get('ram')['limit'] = 25600
fake_quotas.get('cores')['limit'] = 10
fake_quotas.get('instances')['limit'] = 200
res_dict = self.controller.update(req, id, body)
self.assertEqual(res_dict, expected)
def test_user_quotas_show_as_admin(self):
req = fakes.HTTPRequestV3.blank('/os-quota-sets/1234?user_id=1',
use_admin_context=True)
res_dict = self.controller.show(req, '1234')
self.assertEqual(res_dict, self._generate_quota_set(id='1234'))
def test_user_quotas_show_as_unauthorized_user(self):
req = fakes.HTTPRequestV3.blank('/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, '1234')
def test_user_quotas_detail_as_admin(self):
req = fakes.HTTPRequestV3.blank(
'/os-quota-sets/1234/detail?user_id=1',
use_admin_context=True
)
res_dict = self.controller.detail(req, '1234')
self.assertEqual(res_dict, self._generate_detail_quota_set(id='1234'))
def test_user_quotas_detail_as_unauthorized_user(self):
req = fakes.HTTPRequestV3.blank(
'/os-quota-sets/1234/detail?user_id=1'
)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.detail,
req, '1234')
def test_user_quotas_update_as_admin(self):
body = self._generate_quota_set()
url = '/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequestV3.blank(url, use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body)
body['quota_set'].update({'id': 'update_me'})
self.assertEqual(res_dict, body)
def test_user_quotas_update_as_user(self):
body = self._generate_quota_set()
url = '/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequestV3.blank(url)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body)
def test_user_quotas_update_exceed_project(self):
body = {'quota_set': {'instances': 20}}
url = '/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequestV3.blank(url, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_user_quotas_delete_as_unauthorized_user(self):
req = fakes.HTTPRequestV3.blank('/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, '1234')
def test_user_quotas_delete_as_admin(self):
context = context_maker.get_admin_context()
url = '/os-quota-sets/1234?user_id=1'
self.req = fakes.HTTPRequestV3.blank(url)
self.req.environ['nova.context'] = context
self.mox.StubOutWithMock(quota.QUOTAS,
"destroy_all_by_project_and_user")
quota.QUOTAS.destroy_all_by_project_and_user(context, '1234', '1')
self.mox.ReplayAll()
self.controller.delete(self.req, '1234')
self.mox.VerifyAll()
class QuotaXMLSerializerTest(test.TestCase):
def setUp(self):
super(QuotaXMLSerializerTest, self).setUp()
self.serializer = quotas.QuotaTemplate()
self.deserializer = wsgi.XMLDeserializer()
self.detail_serializer = quotas.QuotaDetailTemplate()
def test_serializer(self):
exemplar = dict(quota_set=dict(
id='project_id',
metadata_items=10,
injected_file_path_bytes=255,
injected_file_content_bytes=20,
ram=50,
floating_ips=60,
fixed_ips=-1,
instances=70,
injected_files=80,
security_groups=10,
security_group_rules=20,
key_pairs=100,
cores=90))
text = self.serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('quota_set', tree.tag)
self.assertEqual('project_id', tree.get('id'))
self.assertEqual(len(exemplar['quota_set']) - 1, len(tree))
for child in tree:
self.assertTrue(child.tag in exemplar['quota_set'])
self.assertEqual(int(child.text), exemplar['quota_set'][child.tag])
def test_detail_serializer(self):
exemplar = dict(quota_set=dict(
id='project_id',
metadata_items=dict(limit=10, in_use=1, reserved=2),
injected_file_path_bytes=dict(limit=255, in_use=25, reserved=1),
injected_file_content_bytes=dict(limit=20, in_use=10, reserved=2),
ram=dict(limit=30, in_use=10, reserved=3),
floating_ips=dict(limit=60, in_use=20, reserved=20),
fixed_ips=dict(limit=-1, in_use=20, reserved=0),
instances=dict(limit=10, in_use=2, reserved=2),
injected_files=dict(limit=80, in_use=20, reserved=30),
security_groups=dict(limit=10, in_use=4, reserved=6),
security_group_rules=dict(limit=20, in_use=10, reserved=8),
key_pairs=dict(limit=20, in_use=10, reserved=11),
cores=dict(limit=20, in_use=10, reserved=2),
))
text = self.detail_serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('quota_set', tree.tag)
self.assertEqual('project_id', tree.get('id'))
self.assertEqual(len(exemplar['quota_set']) - 1, len(tree))
for child in tree:
self.assertTrue(child.tag in exemplar['quota_set'])
for k in child.attrib.keys():
self.assertEqual(int(child.attrib[k]),
exemplar['quota_set'][child.tag][k])
def test_deserializer(self):
exemplar = dict(quota_set=dict(
metadata_items='10',
injected_file_content_bytes='20',
ram='50',
floating_ips='60',
fixed_ips='-1',
instances='70',
injected_files='80',
security_groups='10',
security_group_rules='20',
key_pairs='100',
cores='90'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<quota_set>'
'<metadata_items>10</metadata_items>'
'<injected_file_content_bytes>20'
'</injected_file_content_bytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
'<fixed_ips>-1</fixed_ips>'
'<instances>70</instances>'
'<injected_files>80</injected_files>'
'<security_groups>10</security_groups>'
'<security_group_rules>20</security_group_rules>'
'<key_pairs>100</key_pairs>'
'<cores>90</cores>'
'</quota_set>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
fake_quotas = {'ram': {'limit': 51200,
'in_use': 12800,
'reserved': 12800},
'cores': {'limit': 20,
'in_use': 10,
'reserved': 5},
'instances': {'limit': 100,
'in_use': 0,
'reserved': 0}}
def fake_get_quotas(self, context, id, user_id=None, usages=False):
if usages:
return fake_quotas
else:
return dict((k, v['limit']) for k, v in fake_quotas.items())
| apache-2.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/google/protobuf/internal/descriptor_database_test.py | 75 | 3116 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_database."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import descriptor_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf import descriptor_database
class DescriptorDatabaseTest(unittest.TestCase):
def testAdd(self):
db = descriptor_database.DescriptorDatabase()
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
db.Add(file_desc_proto)
self.assertEqual(file_desc_proto, db.FindFileByName(
'google/protobuf/internal/factory_test2.proto'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Enum'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.MessageWithNestedEnumOnly.NestedEnum'))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
jigneshvasoya/ruffus | setup.py | 6 | 6508 | #!/usr/bin/env python
from distutils.core import setup
#from setuptools import find_packages
import sys, os
if not sys.version_info[0:2] >= (2,6):
sys.stderr.write("Requires Python later than 2.6\n")
sys.exit(1)
# quickly import the latest version of ruffus
sys.path.insert(0, os.path.abspath("."))
import ruffus.ruffus_version
sys.path.pop(0)
module_dependencies = []
#module_dependencies = ['multiprocessing>=2.6', 'simplejson']
setup(
name='ruffus',
version=ruffus.ruffus_version.__version, #major.minor[.patch[.sub]]
description='Light-weight Python Computational Pipeline Management',
maintainer="Leo Goodstadt",
maintainer_email="ruffus_lib@llew.org.uk",
author='Leo Goodstadt',
author_email='ruffus@llew.org.uk',
long_description=\
"""
***************************************
Overview
***************************************
The Ruffus module is a lightweight way to add support
for running computational pipelines.
Computational pipelines are often conceptually quite simple, especially
if we breakdown the process into simple stages, or separate **tasks**.
Each stage or **task** in a computational pipeline is represented by a python function
Each python function can be called in parallel to run multiple **jobs**.
Ruffus was originally designed for use in bioinformatics to analyse multiple genome
data sets.
***************************************
Documentation
***************************************
Ruffus documentation can be found `here <http://www.ruffus.org.uk>`__ ,
with `download notes <http://www.ruffus.org.uk/installation.html>`__ ,
a `tutorial <http://www.ruffus.org.uk/tutorials/new_tutorial/introduction.html>`__ and
an `in-depth manual <http://www.ruffus.org.uk/tutorials/new_tutorial/manual_contents.html>`__ .
***************************************
Background
***************************************
The purpose of a pipeline is to determine automatically which parts of a multi-stage
process needs to be run and in what order in order to reach an objective ("targets")
Computational pipelines, especially for analysing large scientific datasets are
in widespread use.
However, even a conceptually simple series of steps can be difficult to set up and
maintain.
***************************************
Design
***************************************
The ruffus module has the following design goals:
* Lightweight
* Scalable / Flexible / Powerful
* Standard Python
* Unintrusive
* As simple as possible
***************************************
Features
***************************************
Automatic support for
* Managing dependencies
* Parallel jobs, including dispatching work to computational clusters
* Re-starting from arbitrary points, especially after errors (checkpointing)
* Display of the pipeline as a flowchart
* Managing complex pipeline topologies
***************************************
A Simple example
***************************************
Use the **@follows(...)** python decorator before the function definitions::
from ruffus import *
import sys
def first_task():
print "First task"
@follows(first_task)
def second_task():
print "Second task"
@follows(second_task)
def final_task():
print "Final task"
the ``@follows`` decorator indicate that the ``first_task`` function precedes ``second_task`` in
the pipeline.
The canonical Ruffus decorator is ``@transform`` which **transforms** data flowing down a
computational pipeline from one stage to teh next.
********
Usage
********
Each stage or **task** in a computational pipeline is represented by a python function
Each python function can be called in parallel to run multiple **jobs**.
1. Import module::
import ruffus
1. Annotate functions with python decorators
2. Print dependency graph if you necessary
- For a graphical flowchart in ``jpg``, ``svg``, ``dot``, ``png``, ``ps``, ``gif`` formats::
pipeline_printout_graph ("flowchart.svg")
This requires ``dot`` to be installed
- For a text printout of all jobs ::
pipeline_printout(sys.stdout)
3. Run the pipeline::
pipeline_run()
""",
url='http://www.ruffus.org.uk',
download_url = "https://pypi.python.org/pypi/ruffus",
install_requires = module_dependencies, #['multiprocessing>=1.0', 'json' ], #, 'python>=2.5'],
setup_requires = module_dependencies, #['multiprocessing>=1.0', 'json'], #, 'python>=2.5'],
classifiers=[
'Intended Audience :: End Users/Desktop',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: System :: Distributed Computing',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries',
'Environment :: Console',
],
license = "MIT",
keywords = "make task pipeline parallel bioinformatics science",
#packages = find_packages('src'), # include all packages under src
#package_dir = {'':'src'}, #packages are under src
packages=['ruffus'],
package_dir={'ruffus': 'ruffus'},
include_package_data = True, # include everything in source control
#package_data = {
# # If any package contains *.txt files, include them:
# '': ['*.TXT'], \
#}
)
#
# http://pypi.python.org/pypi
# http://docs.python.org/distutils/packageindex.html
#
#
#
# python setup.py register
# python setup.py sdist --format=gztar upload
| mit |
GNS3/gns3-vm | ova.py | 1 | 1364 | #!/usr/bin/env python
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Tool for manipulating OVA
"""
import sys
import tarfile
def view(path):
"""
Display the content of the OVA file for human
"""
with tarfile.open(path) as tar:
print("=> Files in .ova:")
for member in tar.getmembers():
print("* " + member.name)
print("")
for member in tar.getmembers():
if member.name.endswith(".ovf") or member.name.endswith(".mf"):
print("=> Content of " + member.name + ":")
print(tar.extractfile(member).read().decode("utf-8"))
print("")
def main():
view(sys.argv[1])
if __name__ == '__main__':
main()
| gpl-3.0 |
nugget/home-assistant | homeassistant/components/esphome/fan.py | 3 | 3973 | """Support for ESPHome fans."""
import logging
from typing import List, Optional, TYPE_CHECKING
from homeassistant.components.esphome import EsphomeEntity, \
platform_async_setup_entry
from homeassistant.components.fan import FanEntity, SPEED_HIGH, SPEED_LOW, \
SPEED_MEDIUM, SUPPORT_OSCILLATE, SUPPORT_SET_SPEED, SPEED_OFF
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
if TYPE_CHECKING:
# pylint: disable=unused-import
from aioesphomeapi import FanInfo, FanState # noqa
DEPENDENCIES = ['esphome']
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistantType,
entry: ConfigEntry, async_add_entities) -> None:
"""Set up ESPHome fans based on a config entry."""
# pylint: disable=redefined-outer-name
from aioesphomeapi import FanInfo, FanState # noqa
await platform_async_setup_entry(
hass, entry, async_add_entities,
component_key='fan',
info_type=FanInfo, entity_type=EsphomeFan,
state_type=FanState
)
FAN_SPEED_STR_TO_INT = {
SPEED_LOW: 0,
SPEED_MEDIUM: 1,
SPEED_HIGH: 2
}
FAN_SPEED_INT_TO_STR = {v: k for k, v in FAN_SPEED_STR_TO_INT.items()}
class EsphomeFan(EsphomeEntity, FanEntity):
"""A fan implementation for ESPHome."""
@property
def _static_info(self) -> 'FanInfo':
return super()._static_info
@property
def _state(self) -> Optional['FanState']:
return super()._state
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
if speed == SPEED_OFF:
await self.async_turn_off()
return
await self._client.fan_command(
self._static_info.key, speed=FAN_SPEED_STR_TO_INT[speed])
async def async_turn_on(self, speed: Optional[str] = None,
**kwargs) -> None:
"""Turn on the fan."""
if speed == SPEED_OFF:
await self.async_turn_off()
return
data = {'key': self._static_info.key, 'state': True}
if speed is not None:
data['speed'] = FAN_SPEED_STR_TO_INT[speed]
await self._client.fan_command(**data)
# pylint: disable=arguments-differ
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the fan."""
await self._client.fan_command(key=self._static_info.key, state=False)
async def async_oscillate(self, oscillating: bool):
"""Oscillate the fan."""
await self._client.fan_command(key=self._static_info.key,
oscillating=oscillating)
@property
def is_on(self) -> Optional[bool]:
"""Return true if the entity is on."""
if self._state is None:
return None
return self._state.state
@property
def speed(self) -> Optional[str]:
"""Return the current speed."""
if self._state is None:
return None
if not self._static_info.supports_speed:
return None
return FAN_SPEED_INT_TO_STR[self._state.speed]
@property
def oscillating(self) -> None:
"""Return the oscillation state."""
if self._state is None:
return None
if not self._static_info.supports_oscillation:
return None
return self._state.oscillating
@property
def speed_list(self) -> Optional[List[str]]:
"""Get the list of available speeds."""
if not self._static_info.supports_speed:
return None
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def supported_features(self) -> int:
"""Flag supported features."""
flags = 0
if self._static_info.supports_oscillation:
flags |= SUPPORT_OSCILLATE
if self._static_info.supports_speed:
flags |= SUPPORT_SET_SPEED
return flags
| apache-2.0 |
FuturesFromYesterday/ardupilot | mk/PX4/Tools/genmsg/test/test_genmsg_msgs.py | 215 | 11646 | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import random
def test_bare_msg_type():
import genmsg.msgs
tests = [(None, None), ('String', 'String'), ('std_msgs/String', 'std_msgs/String'),
('String[10]', 'String'), ('string[10]', 'string'), ('std_msgs/String[10]', 'std_msgs/String'),
]
for val, res in tests:
assert res == genmsg.msgs.bare_msg_type(val)
PKG = 'genmsg'
def test_resolve_type():
from genmsg.msgs import resolve_type, bare_msg_type
for t in ['string', 'string[]', 'string[14]', 'int32', 'int32[]']:
bt = bare_msg_type(t)
t == resolve_type(t, PKG)
assert 'foo/string' == resolve_type('foo/string', PKG)
assert 'std_msgs/Header' == resolve_type('Header', 'roslib')
assert 'std_msgs/Header' == resolve_type('std_msgs/Header', 'roslib')
assert 'std_msgs/Header' == resolve_type('Header', 'stereo_msgs')
assert 'std_msgs/String' == resolve_type('String', 'std_msgs')
assert 'std_msgs/String' == resolve_type('std_msgs/String', 'std_msgs')
assert 'std_msgs/String' == resolve_type('std_msgs/String', PKG)
assert 'std_msgs/String[]' == resolve_type('std_msgs/String[]', PKG)
def test_parse_type():
import genmsg.msgs
tests = [
('a', ('a', False, None)),
('int8', ('int8', False, None)),
('std_msgs/String', ('std_msgs/String', False, None)),
('a[]', ('a', True, None)),
('int8[]', ('int8', True, None)),
('std_msgs/String[]', ('std_msgs/String', True, None)),
('a[1]', ('a', True, 1)),
('int8[1]', ('int8', True, 1)),
('std_msgs/String[1]', ('std_msgs/String', True, 1)),
('a[11]', ('a', True, 11)),
('int8[11]', ('int8', True, 11)),
('std_msgs/String[11]', ('std_msgs/String', True, 11)),
]
for val, res in tests:
assert res == genmsg.msgs.parse_type(val)
fail = ['a[1][2]', 'a[][]', '', None, 'a[', 'a[[1]', 'a[1]]']
for f in fail:
try:
genmsg.msgs.parse_type(f)
assert False, "should have failed on %s"%f
except ValueError as e:
pass
def test_Constant():
import genmsg.msgs
vals = [random.randint(0, 1000) for i in range(0, 3)]
type_, name, val = [str(x) for x in vals]
x = genmsg.msgs.Constant(type_, name, val, str(val))
assert type_ == x.type
assert name == x.name
assert val == x.val
assert x == genmsg.msgs.Constant(type_, name, val, str(val))
assert x != 1
assert not x == 1
assert x != genmsg.msgs.Constant('baz', name, val, str(val))
assert x != genmsg.msgs.Constant(type_, 'foo', val, str(val))
assert x != genmsg.msgs.Constant(type_, name, 'foo', 'foo')
# tripwire
assert repr(x)
assert str(x)
try:
genmsg.msgs.Constant(None, name, val, str(val))
assert False, "should have raised"
except: pass
try:
genmsg.msgs.Constant(type_, None, val, str(val))
assert False, "should have raised"
except: pass
try:
genmsg.msgs.Constant(type_, name, None, 'None')
assert False, "should have raised"
except: pass
try:
genmsg.msgs.Constant(type_, name, val, None)
assert False, "should have raised"
except: pass
try:
x.foo = 'bar'
assert False, 'Constant should not allow arbitrary attr assignment'
except: pass
def test_MsgSpec():
def sub_test_MsgSpec(types, names, constants, text, full_name, has_header):
m = MsgSpec(types, names, constants, text, full_name)
assert m.types == types
assert m.names == names
assert m.text == text
assert has_header == m.has_header()
assert m.constants == constants
assert list(zip(types, names)) == m.fields()
assert m == MsgSpec(types, names, constants, text, full_name)
return m
from genmsg import MsgSpec, InvalidMsgSpec
from genmsg.msgs import Field
# don't allow duplicate fields
try:
MsgSpec(['int32', 'int64'], ['x', 'x'], [], 'int32 x\nint64 x', 'x/DupFields')
assert False, "should have raised"
except InvalidMsgSpec:
pass
# don't allow invalid fields
try:
MsgSpec(['string['], ['x'], [], 'int32 x\nint64 x', 'x/InvalidFields')
assert False, "should have raised"
except InvalidMsgSpec:
pass
# allow empty msg
empty = sub_test_MsgSpec([], [], [], '', 'x/Nothing', False)
assert [] == empty.fields()
assert [] == empty.parsed_fields()
assert 'x/Nothing' == empty.full_name
assert 'x' == empty.package
assert 'Nothing' == empty.short_name
# one-field
one_field = sub_test_MsgSpec(['int32'], ['x'], [], 'int32 x', 'x/OneInt', False)
# make sure that equals tests every declared field
assert one_field == MsgSpec(['int32'], ['x'], [], 'int32 x', 'x/OneInt')
assert one_field != MsgSpec(['uint32'], ['x'], [], 'int32 x', 'x/OneInt')
assert one_field != MsgSpec(['int32'], ['y'], [], 'int32 x', 'x/OneInt')
assert one_field != MsgSpec(['int32'], ['x'], [], 'uint32 x', 'x/OneInt')
assert one_field != MsgSpec(['int32'], ['x'], [], 'int32 x', 'x/OneIntBad')
# test against __ne__ as well
assert one_field != MsgSpec(['int32'], ['x'], [], 'uint32 x', 'x/OneInt')
assert [Field('x', 'int32')] == one_field.parsed_fields(), "%s vs %s"%([Field('x', 'int32')], one_field.parsed_fields())
#test str
assert "int32 x" == str(one_field).strip()
# test variations of multiple fields and headers
two_fields = sub_test_MsgSpec(['int32', 'string'], ['x', 'str'], [], 'int32 x\nstring str', 'x/TwoFields', False)
assert [Field('x', 'int32'), Field('str', 'string')] == two_fields.parsed_fields()
one_header = sub_test_MsgSpec(['std_msgs/Header'], ['header'], [], 'Header header', 'x/OneHeader', True)
header_and_fields = sub_test_MsgSpec(['std_msgs/Header', 'int32', 'string'], ['header', 'x', 'str'], [], 'Header header\nint32 x\nstring str', 'x/HeaderAndFields', True)
embed_types = sub_test_MsgSpec(['std_msgs/Header', 'std_msgs/Int32', 'string'], ['header', 'x', 'str'], [], 'Header header\nstd_msgs/Int32 x\nstring str', 'x/EmbedTypes', True)
#test strify
assert "int32 x\nstring str" == str(two_fields).strip()
# types and names mismatch
try:
MsgSpec(['int32', 'int32'], ['intval'], [], 'int32 intval\int32 y')
assert False, "types and names must align"
except: pass
# test (not) equals against non msgspec
assert not (one_field == 1)
assert one_field != 1
# test constants
from genmsg.msgs import Constant
msgspec = MsgSpec(['int32'], ['x'], [Constant('int8', 'c', 1, '1')], 'int8 c=1\nuint32 x', 'x/Constants')
assert msgspec.constants == [Constant('int8', 'c', 1, '1')]
# tripwire
str(msgspec)
repr(msgspec)
# test that repr doesn't throw an error
[repr(x) for x in [empty, one_field, one_header, two_fields, embed_types]]
def test_Field():
from genmsg.msgs import Field
field = Field('foo', 'string')
assert field == Field('foo', 'string')
assert field != Field('bar', 'string')
assert field != Field('foo', 'int32')
assert field != 1
assert not field == 1
assert field.name == 'foo'
assert field.type == 'string'
assert field.base_type == 'string'
assert field.is_array == False
assert field.array_len == None
assert field.is_header == False
assert field.is_builtin == True
field = Field('foo', 'std_msgs/String')
assert field.type == 'std_msgs/String'
assert field.base_type == 'std_msgs/String'
assert field.is_array == False
assert field.array_len == None
assert field.is_header == False
assert field.is_builtin == False
field = Field('foo', 'std_msgs/String[5]')
assert field.type == 'std_msgs/String[5]'
assert field.base_type == 'std_msgs/String'
assert field.is_array == True
assert field.array_len == 5
assert field.is_header == False
assert field.is_builtin == False
field = Field('foo', 'std_msgs/String[]')
assert field.type == 'std_msgs/String[]'
assert field.base_type == 'std_msgs/String'
assert field.is_array == True
assert field.array_len == None
assert field.is_header == False
assert field.is_builtin == False
field = Field('foo', 'std_msgs/Header')
assert field.type == 'std_msgs/Header'
assert field.is_header == True
assert field.is_builtin == False
field = Field('foo', 'std_msgs/Header[]')
assert field.type == 'std_msgs/Header[]'
assert field.is_header == False
#tripwire
repr(field)
def test_is_valid_msg_type():
import genmsg.msgs
vals = [
#basic
'F', 'f', 'Foo', 'Foo1',
'std_msgs/String',
# arrays
'Foo[]', 'Foo[1]', 'Foo[10]',
]
for v in vals:
assert genmsg.msgs.is_valid_msg_type(v), "genmsg.msgs.is_valid_msg_type should have returned True for '%s'"%v
# bad cases
vals = [None, '', '#', '%', 'Foo%', 'Woo Woo',
'/', '/String',
'Foo[f]', 'Foo[1d]', 'Foo[-1]', 'Foo[1:10]', 'Foo[', 'Foo]', 'Foo[]Bar']
for v in vals:
assert not genmsg.msgs.is_valid_msg_type(v), "genmsg.msgs.is_valid_msg_type should have returned False for '%s'"%v
def test_is_valid_constant_type():
import genmsg.msgs
valid = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', \
'uint64', 'float32', 'float64', 'char', 'byte', 'string']
invalid = [
'std_msgs/String', '/', 'String',
'time', 'duration','header',
]
for v in valid:
assert genmsg.msgs.is_valid_constant_type(v), "genmsg.msgs.is_valid_constant_type should have returned True for '%s'"%v
for v in invalid:
assert not genmsg.msgs.is_valid_constant_type(v), "genmsg.msgs.is_valid_constant_type should have returned False for '%s'"%v
| gpl-3.0 |
polyaxon/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/models/v1_run_edge_kind.py | 1 | 3561 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.0
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1RunEdgeKind(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
ACTION = "action"
EVENT = "event"
HOOK = "hook"
DAG = "dag"
JOIN = "join"
RUN = "run"
TB = "tb"
BUILD = "build"
allowable_values = [ACTION, EVENT, HOOK, DAG, JOIN, RUN, TB, BUILD] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""V1RunEdgeKind - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1RunEdgeKind):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1RunEdgeKind):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 |
oppo-source/Find7-5.0-kernel-source | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
gjhiggins/RDFAlchemy | rdfalchemy/engine/__init__.py | 1 | 5337 | """
"""
import os
import re
import cgi
import urllib
def create_engine(url='', identifier="", create=False):
"""
:returns: returns an opened rdflib ConjunctiveGraph
:param url: a string of the url
:param identifier: URIRef of the default context for writing e.g.:
- create_engine('sleepycat://~/working/rdf_db')
- create_engine('kyotocabinet://~/working/rdf_db')
- create_engine('zodb:///var/rdflib/Data.fs')
- create_engine('zodb://localhost:8672')
- create_engine(
'sesame://www.example.com:8080/openrdf-sesame/repositories/Test')
- create_engine('sparql://www.example.com:2020/sparql')
for zodb:
the key in the Zope database is hardcoded as 'rdflib', urls ending in `.fs`
indicate FileStorage, otherwise ClientStoreage is assumed which requires
a ZEO Server to be running
for sqlalchemy, prepend the string "sqlachemy+" to a valid SQLAlchemy dburi
form:
- create_engine('sqlalchemy+sqlite://')
- create_engine('sqlalchemy+sqlite:////absolute/path/to/foo.db')
- create_engine('sqlalchemy+mysql://myname@localhost/rdflibdb')
- create_engine('sqlalchemy+postgresql://myname@localhost/rdflibdb')
etc.
"""
if url == '' or url.startswith('IOMemory'):
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('IOMemory')
elif url.lower().startswith('sleepycat://'):
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('Sleepycat', identifier=identifier)
openstr = os.path.abspath(os.path.expanduser(url[12:]))
db.open(openstr, create=create)
elif url.lower().startswith('kyotocabinet://'):
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('Kyotocabinet', identifier=identifier)
openstr = os.path.abspath(os.path.expanduser(url[15:]))
db.open(openstr, create=create)
elif url.lower().startswith('sqlalchemy+'):
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('SQLAlchemy', identifier=identifier)
db.open(url[11:], create=create)
elif url.lower().startswith('zodb://'):
import ZODB
# import transaction
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('ZODB')
if url.endswith('.fs'):
from ZODB.FileStorage import FileStorage
openstr = os.path.abspath(os.path.expanduser(url[7:]))
if not os.path.exists(openstr) and not create:
raise("File not found: %s" % openstr)
fs = FileStorage(openstr)
else:
from ZEO.ClientStorage import ClientStorage
schema, opts = _parse_rfc1738_args(url)
fs = ClientStorage((opts['host'], int(opts['port'])))
# get the Zope Database
zdb = ZODB.DB(fs)
# open it
conn = zdb.open()
#get the root
root = conn.root()
# get the Conjunctive Graph
if 'rdflib' not in root and create:
root['rdflib'] = ConjunctiveGraph('ZODB')
db = root['rdflib']
elif url.lower().startswith('sesame://'):
from rdfalchemy.sparql.sesame2 import SesameGraph
db = SesameGraph("http://" + url[9:])
elif url.lower().startswith('sparql://'):
from rdfalchemy.sparql import SPARQLGraph
db = SPARQLGraph("http://" + url[9:])
else:
raise "Could not parse string '%s'" % url
return db
def engine_from_config(configuration, prefix='rdfalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
:param configuration: a dictionary, typically produced from a config file
where keys are prefixed, such as `rdfalchemy.dburi`, etc.
:param prefix: indicates the prefix to be searched for.
"""
options = dict(
[(key[len(prefix):], configuration[key])
for key in configuration if key.startswith(prefix)])
options.update(kwargs)
url = options.pop('dburi')
return create_engine(url, **options)
def _parse_rfc1738_args(name):
""" parse url str into options
code orig from sqlalchemy.engine.url """
pattern = re.compile(r'''
(\w+)://
(?:
([^:/]*)
(?::([^/]*))?
@)?
(?:
([^/:]*)
(?::([^/]*))?
)?
(?:/(.*))?
''', re.X)
m = pattern.match(name)
if m is not None:
(name, username, password, host, port, database) = m.group(
1, 2, 3, 4, 5, 6)
if database is not None:
tokens = database.split(r"?", 2)
database = tokens[0]
query = (
len(tokens) > 1 and dict(
cgi.parse_qsl(tokens[1])) or None)
if query is not None:
query = dict([(k.encode('ascii'), query[k]) for k in query])
else:
query = None
opts = {
'username': username, 'password': password, 'host': host,
'port': port, 'database': database, 'query': query}
if opts['password'] is not None:
opts['password'] = urllib.unquote_plus(opts['password'])
return (name, opts)
else:
raise ValueError("Could not parse rfc1738 URL from string '%s'" % name)
| mit |
ranl/mta-exam-scraper | mta_exam_scraper/spiders/user_agent_generator.py | 1 | 1190 | # -*- coding: utf-8 -*-
"""
Create a list of user agents
scrapy crawl --output=/tmp/ran --output-format=csv user_agent_generator && sed -i '1d' /tmp/ran
"""
# python
import re
# Scrapy
import scrapy
# mta_exam_scraper
from mta_exam_scraper.items import UserAgent
class UserAgentGenerator(scrapy.Spider):
"""
The Spider Class
"""
name = 'user_agent_generator'
allowed_domains = ['http://www.useragentstring.com/']
min_len = 20
start_urls = ('http://www.useragentstring.com/pages/All/',)
exclude = [
re.compile('^/pages/.*\.php$'),
re.compile('^/$'),
re.compile('^http'),
]
def parse(self, response):
for a in response.xpath('//a'):
try:
ua = a.xpath('text()').extract()[0].strip().strip('"')
except Exception:
continue
if len(ua) <= self.min_len:
continue
for r in self.exclude:
try:
if r.match(a.xpath('@href').extract()[0]):
break
except Exception:
pass
else:
yield UserAgent(name=ua)
| apache-2.0 |
vmax-feihu/hue | desktop/core/ext-py/kazoo-2.0/kazoo/interfaces.py | 54 | 6363 | """Kazoo Interfaces
.. versionchanged:: 1.4
The classes in this module used to be interface declarations based on
`zope.interface.Interface`. They were converted to normal classes and
now serve as documentation only.
"""
# public API
class IHandler(object):
"""A Callback Handler for Zookeeper completion and watch callbacks.
This object must implement several methods responsible for
determining how completion / watch callbacks are handled as well as
the method for calling :class:`IAsyncResult` callback functions.
These functions are used to abstract differences between a Python
threading environment and asynchronous single-threaded environments
like gevent. The minimum functionality needed for Kazoo to handle
these differences is encompassed in this interface.
The Handler should document how callbacks are called for:
* Zookeeper completion events
* Zookeeper watch events
.. attribute:: name
Human readable name of the Handler interface.
.. attribute:: timeout_exception
Exception class that should be thrown and captured if a
result is not available within the given time.
.. attribute:: sleep_func
Appropriate sleep function that can be called with a single
argument and sleep.
"""
def start(self):
"""Start the handler, used for setting up the handler."""
def stop(self):
"""Stop the handler. Should block until the handler is safely
stopped."""
def select(self):
"""A select method that implements Python's select.select
API"""
def socket(self):
"""A socket method that implements Python's socket.socket
API"""
def create_connection(self):
"""A socket method that implements Python's
socket.create_connection API"""
def event_object(self):
"""Return an appropriate object that implements Python's
threading.Event API"""
def lock_object(self):
"""Return an appropriate object that implements Python's
threading.Lock API"""
def rlock_object(self):
"""Return an appropriate object that implements Python's
threading.RLock API"""
def async_result(self):
"""Return an instance that conforms to the
:class:`~IAsyncResult` interface appropriate for this
handler"""
def spawn(self, func, *args, **kwargs):
"""Spawn a function to run asynchronously
:param args: args to call the function with.
:param kwargs: keyword args to call the function with.
This method should return immediately and execute the function
with the provided args and kwargs in an asynchronous manner.
"""
def dispatch_callback(self, callback):
"""Dispatch to the callback object
:param callback: A :class:`~kazoo.protocol.states.Callback`
object to be called.
"""
class IAsyncResult(object):
"""An Async Result object that can be queried for a value that has
been set asynchronously.
This object is modeled on the ``gevent`` AsyncResult object.
The implementation must account for the fact that the :meth:`set`
and :meth:`set_exception` methods will be called from within the
Zookeeper thread which may require extra care under asynchronous
environments.
.. attribute:: value
Holds the value passed to :meth:`set` if :meth:`set` was
called. Otherwise `None`.
.. attribute:: exception
Holds the exception instance passed to :meth:`set_exception`
if :meth:`set_exception` was called. Otherwise `None`.
"""
def ready(self):
"""Return `True` if and only if it holds a value or an
exception"""
def successful(self):
"""Return `True` if and only if it is ready and holds a
value"""
def set(self, value=None):
"""Store the value. Wake up the waiters.
:param value: Value to store as the result.
Any waiters blocking on :meth:`get` or :meth:`wait` are woken
up. Sequential calls to :meth:`wait` and :meth:`get` will not
block at all."""
def set_exception(self, exception):
"""Store the exception. Wake up the waiters.
:param exception: Exception to raise when fetching the value.
Any waiters blocking on :meth:`get` or :meth:`wait` are woken
up. Sequential calls to :meth:`wait` and :meth:`get` will not
block at all."""
def get(self, block=True, timeout=None):
"""Return the stored value or raise the exception
:param block: Whether this method should block or return
immediately.
:type block: bool
:param timeout: How long to wait for a value when `block` is
`True`.
:type timeout: float
If this instance already holds a value / an exception, return /
raise it immediately. Otherwise, block until :meth:`set` or
:meth:`set_exception` has been called or until the optional
timeout occurs."""
def get_nowait(self):
"""Return the value or raise the exception without blocking.
If nothing is available, raise the Timeout exception class on
the associated :class:`IHandler` interface."""
def wait(self, timeout=None):
"""Block until the instance is ready.
:param timeout: How long to wait for a value when `block` is
`True`.
:type timeout: float
If this instance already holds a value / an exception, return /
raise it immediately. Otherwise, block until :meth:`set` or
:meth:`set_exception` has been called or until the optional
timeout occurs."""
def rawlink(self, callback):
"""Register a callback to call when a value or an exception is
set
:param callback:
A callback function to call after :meth:`set` or
:meth:`set_exception` has been called. This function will
be passed a single argument, this instance.
:type callback: func
"""
def unlink(self, callback):
"""Remove the callback set by :meth:`rawlink`
:param callback: A callback function to remove.
:type callback: func
"""
| apache-2.0 |
katiecheng/Bombolone | env/lib/python2.7/site-packages/gridfs/errors.py | 16 | 1602 | # Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by the :mod:`gridfs` package"""
from pymongo.errors import PyMongoError
class GridFSError(PyMongoError):
"""Base class for all GridFS exceptions.
.. versionadded:: 1.5
"""
class CorruptGridFile(GridFSError):
"""Raised when a file in :class:`~gridfs.GridFS` is malformed.
"""
class NoFile(GridFSError):
"""Raised when trying to read from a non-existent file.
.. versionadded:: 1.6
"""
class FileExists(GridFSError):
"""Raised when trying to create a file that already exists.
.. versionadded:: 1.7
"""
class UnsupportedAPI(GridFSError):
"""Raised when trying to use the old GridFS API.
In version 1.6 of the PyMongo distribution there were backwards
incompatible changes to the GridFS API. Upgrading shouldn't be
difficult, but the old API is no longer supported (with no
deprecation period). This exception will be raised when attempting
to use unsupported constructs from the old API.
.. versionadded:: 1.6
"""
| bsd-3-clause |
gdementen/numexpr-numba | bench/poly.py | 1 | 1861 | ###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
#######################################################################
# This script compares the speed of the computation of a polynomial
# for different (numpy and numexpr) in-memory paradigms.
#
# Author: Francesc Alted
# Date: 2010-07-06
#######################################################################
import sys
from time import time
import numpy as np
import numexpr as ne
expr = ".25*x**3 + .75*x**2 - 1.5*x - 2" # the polynomial to compute
expr = "((.25*x + .75)*x - 1.5)*x - 2" # a computer-friendly polynomial
N = 10*1000*1000 # the number of points to compute expression
x = np.linspace(-1, 1, N) # the x in range [-1, 1]
#what = "numpy" # uses numpy for computations
what = "numexpr" # uses numexpr for computations
def compute():
"""Compute the polynomial."""
if what == "numpy":
y = eval(expr)
else:
y = ne.evaluate(expr)
return len(y)
if __name__ == '__main__':
if len(sys.argv) > 1: # first arg is the package to use
what = sys.argv[1]
if len(sys.argv) > 2: # second arg is the number of threads to use
nthreads = int(sys.argv[2])
if "ncores" in dir(ne):
ne.set_num_threads(nthreads)
if what not in ("numpy", "numexpr"):
print "Unrecognized module:", what
sys.exit(0)
print "Computing: '%s' using %s with %d points" % (expr, what, N)
t0 = time()
result = compute()
ts = round(time() - t0, 3)
print "*** Time elapsed:", ts
| mit |
RO-ny9/python-for-android | python3-alpha/python3-src/Lib/sched.py | 47 | 5541 | """A generally useful event scheduler class.
Each instance of this class manages its own queue.
No multi-threading is implied; you are supposed to hack that
yourself, or use a single instance per application.
Each instance is parametrized with two functions, one that is
supposed to return the current time, one that is supposed to
implement a delay. You can implement real-time scheduling by
substituting time and sleep from built-in module time, or you can
implement simulated time by writing your own functions. This can
also be used to integrate scheduling with STDWIN events; the delay
function is allowed to modify the queue. Time can be expressed as
integers or floating point numbers, as long as it is consistent.
Events are specified by tuples (time, priority, action, argument).
As in UNIX, lower priority numbers mean higher priority; in this
way the queue can be maintained as a priority queue. Execution of the
event means calling the action function, passing it the argument
sequence in "argument" (remember that in Python, multiple function
arguments are be packed in a sequence).
The action function may be an instance method so it
has another way to reference private data (besides global variables).
"""
# XXX The timefunc and delayfunc should have been defined as methods
# XXX so you can define new kinds of schedulers using subclassing
# XXX instead of having to define a module or class just to hold
# XXX the global state of your particular time and delay functions.
import heapq
from collections import namedtuple
__all__ = ["scheduler"]
class Event(namedtuple('Event', 'time, priority, action, argument')):
def __eq__(s, o): return (s.time, s.priority) == (o.time, o.priority)
def __ne__(s, o): return (s.time, s.priority) != (o.time, o.priority)
def __lt__(s, o): return (s.time, s.priority) < (o.time, o.priority)
def __le__(s, o): return (s.time, s.priority) <= (o.time, o.priority)
def __gt__(s, o): return (s.time, s.priority) > (o.time, o.priority)
def __ge__(s, o): return (s.time, s.priority) >= (o.time, o.priority)
class scheduler:
def __init__(self, timefunc, delayfunc):
"""Initialize a new instance, passing the time and delay
functions"""
self._queue = []
self.timefunc = timefunc
self.delayfunc = delayfunc
def enterabs(self, time, priority, action, argument):
"""Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
"""
event = Event(time, priority, action, argument)
heapq.heappush(self._queue, event)
return event # The ID
def enter(self, delay, priority, action, argument):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument)
def cancel(self, event):
"""Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises ValueError.
"""
self._queue.remove(event)
heapq.heapify(self._queue)
def empty(self):
"""Check whether the queue is empty."""
return not self._queue
def run(self):
"""Execute events until the queue is empty.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
delayfunc(time - now)
else:
event = pop(q)
# Verify that the event was not removed or altered
# by another thread after we last looked at q[0].
if event is checked_event:
action(*argument)
delayfunc(0) # Let other threads run
else:
heapq.heappush(q, event)
@property
def queue(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
events = self._queue[:]
return map(heapq.heappop, [events]*len(events))
| apache-2.0 |
walterreade/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
endlessm/chromium-browser | third_party/catapult/third_party/oauth2client/oauth2client/util.py | 60 | 5704 | #!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common utility library."""
__author__ = [
'rafek@google.com (Rafe Kaplan)',
'guido@google.com (Guido van Rossum)',
]
__all__ = [
'positional',
'POSITIONAL_WARNING',
'POSITIONAL_EXCEPTION',
'POSITIONAL_IGNORE',
]
import functools
import inspect
import logging
import sys
import types
import six
from six.moves import urllib
logger = logging.getLogger(__name__)
POSITIONAL_WARNING = 'WARNING'
POSITIONAL_EXCEPTION = 'EXCEPTION'
POSITIONAL_IGNORE = 'IGNORE'
POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
POSITIONAL_IGNORE])
positional_parameters_enforcement = POSITIONAL_WARNING
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments my be positional.
This decorator makes it easy to support Python 3 style keyword-only
parameters. For example, in Python 3 it is possible to write::
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after ``*`` must be a keyword::
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example
^^^^^^^
To define a function like above, do::
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a required
keyword argument::
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter::
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
``self`` and ``cls``::
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by
``util.positional_parameters_enforcement``, which may be set to
``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
nothing, respectively, if a declaration is violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args from
being used as positional parameters.
Raises:
TypeError if a key-word only argument is provided as a positional
parameter, but only if util.positional_parameters_enforcement is set to
POSITIONAL_EXCEPTION.
"""
def positional_decorator(wrapped):
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
message = '%s() takes at most %d positional argument%s (%d given)' % (
wrapped.__name__, max_positional_args, plural_s, len(args))
if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
raise TypeError(message)
elif positional_parameters_enforcement == POSITIONAL_WARNING:
logger.warning(message)
else: # IGNORE
pass
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, six.integer_types):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def scopes_to_string(scopes):
"""Converts scope value to a string.
If scopes is a string then it is simply passed through. If scopes is an
iterable then a string is returned that is all the individual scopes
concatenated with spaces.
Args:
scopes: string or iterable of strings, the scopes.
Returns:
The scopes formatted as a single string.
"""
if isinstance(scopes, six.string_types):
return scopes
else:
return ' '.join(scopes)
def dict_to_tuple_key(dictionary):
"""Converts a dictionary to a tuple that can be used as an immutable key.
The resulting key is always sorted so that logically equivalent dictionaries
always produce an identical tuple for a key.
Args:
dictionary: the dictionary to use as the key.
Returns:
A tuple representing the dictionary in it's naturally sorted ordering.
"""
return tuple(sorted(dictionary.items()))
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urllib.parse.urlparse(url))
q = dict(urllib.parse.parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.parse.urlencode(q)
return urllib.parse.urlunparse(parsed)
| bsd-3-clause |
alxgu/ansible | lib/ansible/modules/network/fortios/fortios_log_fortianalyzer3_setting.py | 24 | 13194 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_fortianalyzer3_setting
short_description: Global FortiAnalyzer settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_fortianalyzer3 feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_fortianalyzer3_setting:
description:
- Global FortiAnalyzer settings.
default: null
suboptions:
__change_ip:
description:
- Hidden attribute.
certificate:
description:
- Certificate used to communicate with FortiAnalyzer. Source certificate.local.name.
conn-timeout:
description:
- FortiAnalyzer connection time-out in seconds (for status and log buffer).
enc-algorithm:
description:
- Enable/disable sending FortiAnalyzer log data with SSL encryption.
choices:
- high-medium
- high
- low
- disable
faz-type:
description:
- Hidden setting index of FortiAnalyzer.
hmac-algorithm:
description:
- FortiAnalyzer IPsec tunnel HMAC algorithm.
choices:
- sha256
- sha1
ips-archive:
description:
- Enable/disable IPS packet archive logging.
choices:
- enable
- disable
mgmt-name:
description:
- Hidden management name of FortiAnalyzer.
monitor-failure-retry-period:
description:
- Time between FortiAnalyzer connection retries in seconds (for status and log buffer).
monitor-keepalive-period:
description:
- Time between OFTP keepalives in seconds (for status and log buffer).
reliable:
description:
- Enable/disable reliable logging to FortiAnalyzer.
choices:
- enable
- disable
server:
description:
- The remote FortiAnalyzer.
source-ip:
description:
- Source IPv4 or IPv6 address used to communicate with FortiAnalyzer.
ssl-min-proto-version:
description:
- Minimum supported protocol version for SSL/TLS connections (default is to follow system global setting).
choices:
- default
- SSLv3
- TLSv1
- TLSv1-1
- TLSv1-2
status:
description:
- Enable/disable logging to FortiAnalyzer.
choices:
- enable
- disable
upload-day:
description:
- Day of week (month) to upload logs.
upload-interval:
description:
- Frequency to upload log files to FortiAnalyzer.
choices:
- daily
- weekly
- monthly
upload-option:
description:
- Enable/disable logging to hard disk and then uploading to FortiAnalyzer.
choices:
- store-and-upload
- realtime
- 1-minute
- 5-minute
upload-time:
description:
- "Time to upload logs (hh:mm)."
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Global FortiAnalyzer settings.
fortios_log_fortianalyzer3_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_fortianalyzer3_setting:
__change_ip: "3"
certificate: "<your_own_value> (source certificate.local.name)"
conn-timeout: "5"
enc-algorithm: "high-medium"
faz-type: "7"
hmac-algorithm: "sha256"
ips-archive: "enable"
mgmt-name: "<your_own_value>"
monitor-failure-retry-period: "11"
monitor-keepalive-period: "12"
reliable: "enable"
server: "192.168.100.40"
source-ip: "84.230.14.43"
ssl-min-proto-version: "default"
status: "enable"
upload-day: "<your_own_value>"
upload-interval: "daily"
upload-option: "store-and-upload"
upload-time: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_fortianalyzer3_setting_data(json):
option_list = ['__change_ip', 'certificate', 'conn-timeout',
'enc-algorithm', 'faz-type', 'hmac-algorithm',
'ips-archive', 'mgmt-name', 'monitor-failure-retry-period',
'monitor-keepalive-period', 'reliable', 'server',
'source-ip', 'ssl-min-proto-version', 'status',
'upload-day', 'upload-interval', 'upload-option',
'upload-time']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def log_fortianalyzer3_setting(data, fos):
vdom = data['vdom']
log_fortianalyzer3_setting_data = data['log_fortianalyzer3_setting']
filtered_data = filter_log_fortianalyzer3_setting_data(log_fortianalyzer3_setting_data)
return fos.set('log.fortianalyzer3',
'setting',
data=filtered_data,
vdom=vdom)
def fortios_log_fortianalyzer3(data, fos):
login(data)
methodlist = ['log_fortianalyzer3_setting']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_fortianalyzer3_setting": {
"required": False, "type": "dict",
"options": {
"__change_ip": {"required": False, "type": "int"},
"certificate": {"required": False, "type": "str"},
"conn-timeout": {"required": False, "type": "int"},
"enc-algorithm": {"required": False, "type": "str",
"choices": ["high-medium", "high", "low",
"disable"]},
"faz-type": {"required": False, "type": "int"},
"hmac-algorithm": {"required": False, "type": "str",
"choices": ["sha256", "sha1"]},
"ips-archive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"mgmt-name": {"required": False, "type": "str"},
"monitor-failure-retry-period": {"required": False, "type": "int"},
"monitor-keepalive-period": {"required": False, "type": "int"},
"reliable": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"server": {"required": False, "type": "str"},
"source-ip": {"required": False, "type": "str"},
"ssl-min-proto-version": {"required": False, "type": "str",
"choices": ["default", "SSLv3", "TLSv1",
"TLSv1-1", "TLSv1-2"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"upload-day": {"required": False, "type": "str"},
"upload-interval": {"required": False, "type": "str",
"choices": ["daily", "weekly", "monthly"]},
"upload-option": {"required": False, "type": "str",
"choices": ["store-and-upload", "realtime", "1-minute",
"5-minute"]},
"upload-time": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_fortianalyzer3(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ksrajkumar/openerp-6.1 | openerp/workflow/__init__.py | 76 | 1110 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wkf_service
#.apidoc title: Workflow objects
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rooi/CouchPotatoServer | libs/bs4/__init__.py | 417 | 15401 | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.3.2"
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(markup, from_encoding)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
navigable = subclass(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
most_recent_element = most_recent_element or self._most_recent_element
o.setup(parent, most_recent_element)
if most_recent_element is not None:
most_recent_element.next_element = o
self._most_recent_element = o
parent.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| gpl-3.0 |
benjixx/ansible | lib/ansible/playbook/block.py | 13 | 14599 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_tasks
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
class Block(Base, Become, Conditional, Taggable):
_block = FieldAttribute(isa='list', default=[])
_rescue = FieldAttribute(isa='list', default=[])
_always = FieldAttribute(isa='list', default=[])
_delegate_to = FieldAttribute(isa='list')
_delegate_facts = FieldAttribute(isa='bool', default=False)
_any_errors_fatal = FieldAttribute(isa='bool')
# for future consideration? this would be functionally
# similar to the 'else' clause for exceptions
#_otherwise = FieldAttribute(isa='list')
def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
self._play = play
self._role = role
self._task_include = None
self._parent_block = None
self._dep_chain = None
self._use_handlers = use_handlers
self._implicit = implicit
if task_include:
self._task_include = task_include
elif parent_block:
self._parent_block = parent_block
super(Block, self).__init__()
def get_vars(self):
'''
Blocks do not store variables directly, however they may be a member
of a role or task include which does, so return those if present.
'''
all_vars = self.vars.copy()
if self._role:
all_vars.update(self._role.get_vars(self._dep_chain))
if self._parent_block:
all_vars.update(self._parent_block.get_vars())
if self._task_include:
all_vars.update(self._task_include.get_vars())
return all_vars
@staticmethod
def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
implicit = not Block.is_block(data)
b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers, implicit=implicit)
return b.load_data(data, variable_manager=variable_manager, loader=loader)
@staticmethod
def is_block(ds):
is_block = False
if isinstance(ds, dict):
for attr in ('block', 'rescue', 'always'):
if attr in ds:
is_block = True
break
return is_block
def preprocess_data(self, ds):
'''
If a simple task is given, an implicit block for that single task
is created, which goes in the main portion of the block
'''
if not Block.is_block(ds):
if isinstance(ds, list):
return super(Block, self).preprocess_data(dict(block=ds))
else:
return super(Block, self).preprocess_data(dict(block=[ds]))
return super(Block, self).preprocess_data(ds)
def _load_block(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=self._task_include,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_rescue(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=self._task_include,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_always(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=self._task_include,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def get_dep_chain(self):
if self._dep_chain is None:
if self._parent_block:
return self._parent_block.get_dep_chain()
elif self._task_include:
return self._task_include._block.get_dep_chain()
else:
return None
else:
return self._dep_chain[:]
def copy(self, exclude_parent=False, exclude_tasks=False):
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
if isinstance(task, Block):
new_task = task.copy(exclude_parent=True)
new_task._parent_block = new_block
else:
new_task = task.copy(exclude_block=True)
new_task._block = new_block
new_task_list.append(new_task)
return new_task_list
new_me = super(Block, self).copy()
new_me._play = self._play
new_me._use_handlers = self._use_handlers
if self._dep_chain:
new_me._dep_chain = self._dep_chain[:]
if not exclude_tasks:
new_me.block = _dupe_task_list(self.block or [], new_me)
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
new_me.always = _dupe_task_list(self.always or [], new_me)
new_me._parent_block = None
if self._parent_block and not exclude_parent:
new_me._parent_block = self._parent_block.copy(exclude_tasks=exclude_tasks)
new_me._role = None
if self._role:
new_me._role = self._role
new_me._task_include = None
if self._task_include:
new_me._task_include = self._task_include.copy(exclude_block=True)
new_me._task_include._block = self._task_include._block.copy(exclude_tasks=True)
return new_me
def serialize(self):
'''
Override of the default serialize method, since when we're serializing
a task we don't want to include the attribute list of tasks.
'''
data = dict()
for attr in self._get_base_attributes():
if attr not in ('block', 'rescue', 'always'):
data[attr] = getattr(self, attr)
data['dep_chain'] = self.get_dep_chain()
if self._role is not None:
data['role'] = self._role.serialize()
if self._task_include is not None:
data['task_include'] = self._task_include.serialize()
if self._parent_block is not None:
data['parent_block'] = self._parent_block.copy(exclude_tasks=True).serialize()
return data
def deserialize(self, data):
'''
Override of the default deserialize method, to match the above overridden
serialize method
'''
from ansible.playbook.task import Task
# we don't want the full set of attributes (the task lists), as that
# would lead to a serialize/deserialize loop
for attr in self._get_base_attributes():
if attr in data and attr not in ('block', 'rescue', 'always'):
setattr(self, attr, data.get(attr))
self._dep_chain = data.get('dep_chain', None)
# if there was a serialized role, unpack it too
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
# if there was a serialized task include, unpack it too
ti_data = data.get('task_include')
if ti_data:
ti = Task()
ti.deserialize(ti_data)
self._task_include = ti
pb_data = data.get('parent_block')
if pb_data:
pb = Block()
pb.deserialize(pb_data)
self._parent_block = pb
self._dep_chain = self._parent_block.get_dep_chain()
def evaluate_conditional(self, templar, all_vars):
dep_chain = self.get_dep_chain()
if dep_chain:
for dep in dep_chain:
if not dep.evaluate_conditional(templar, all_vars):
return False
if self._task_include is not None:
if not self._task_include.evaluate_conditional(templar, all_vars):
return False
if self._parent_block is not None:
if not self._parent_block.evaluate_conditional(templar, all_vars):
return False
elif self._role is not None:
if not self._role.evaluate_conditional(templar, all_vars):
return False
return super(Block, self).evaluate_conditional(templar, all_vars)
def set_loader(self, loader):
self._loader = loader
if self._parent_block:
self._parent_block.set_loader(loader)
elif self._role:
self._role.set_loader(loader)
if self._task_include:
self._task_include.set_loader(loader)
dep_chain = self.get_dep_chain()
if dep_chain:
for dep in dep_chain:
dep.set_loader(loader)
def _get_parent_attribute(self, attr, extend=False):
'''
Generic logic to get the attribute or parent attribute for a block value.
'''
value = None
try:
value = self._attributes[attr]
if self._parent_block and (value is None or extend):
parent_value = getattr(self._parent_block, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
if self._task_include and (value is None or extend):
parent_value = getattr(self._task_include, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
if self._role and (value is None or extend) and hasattr(self._role, attr):
parent_value = getattr(self._role, attr, None)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
dep_chain = self.get_dep_chain()
if dep_chain and (value is None or extend):
dep_chain.reverse()
for dep in dep_chain:
dep_value = getattr(dep, attr, None)
if extend:
value = self._extend_value(value, dep_value)
else:
value = dep_value
if value is not None and not extend:
break
if self._play and (value is None or extend) and hasattr(self._play, attr):
parent_value = getattr(self._play, attr, None)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
except KeyError as e:
pass
return value
def _get_attr_environment(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
environment = self._attributes['environment']
parent_environment = self._get_parent_attribute('environment', extend=True)
if parent_environment is not None:
environment = self._extend_value(environment, parent_environment)
return environment
def _get_attr_any_errors_fatal(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
return self._get_parent_attribute('any_errors_fatal')
def filter_tagged_tasks(self, play_context, all_vars):
'''
Creates a new block, with task lists filtered based on the tags contained
within the play_context object.
'''
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
if isinstance(task, Block):
tmp_list.append(evaluate_block(task))
elif task.action == 'meta' \
or (task.action == 'include' and task.evaluate_tags([], play_context.skip_tags, all_vars=all_vars)) \
or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
tmp_list.append(task)
return tmp_list
def evaluate_block(block):
new_block = self.copy(exclude_tasks=True)
new_block.block = evaluate_and_append_task(block.block)
new_block.rescue = evaluate_and_append_task(block.rescue)
new_block.always = evaluate_and_append_task(block.always)
return new_block
return evaluate_block(self)
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
| gpl-3.0 |
snakeleon/YouCompleteMe-x64 | third_party/ycmd/third_party/jedi_deps/jedi/jedi/inference/__init__.py | 2 | 8499 | """
Type inference of Python code in |jedi| is based on three assumptions:
* The code uses as least side effects as possible. Jedi understands certain
list/tuple/set modifications, but there's no guarantee that Jedi detects
everything (list.append in different modules for example).
* No magic is being used:
- metaclasses
- ``setattr()`` / ``__import__()``
- writing to ``globals()``, ``locals()``, ``object.__dict__``
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle I call lazy type inference. That
said, the typical entry point for static analysis is calling
``infer_expr_stmt``. There's separate logic for autocompletion in the API, the
inference_state is all about inferring an expression.
TODO this paragraph is not what jedi does anymore, it's similar, but not the
same.
Now you need to understand what follows after ``infer_expr_stmt``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``infer_expr_stmt`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``InferenceState.infer_expr_stmt`` doesn't do much, because there's no assignment.
- ``Context.infer_node`` cares for resolving the dotted path
- ``InferenceState.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``infer_node`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
calls to ``find_types``. However the second call would be ignored, because the
first one would return nothing (there's no foo attribute in ``date``).
What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``infer_expr_stmt`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
Jedi has been tested very well, so you can just start modifying code. It's best
to write your own test first for your "new" feature. Don't be scared of
breaking stuff. As long as the tests pass, you're most likely to be fine.
I need to mention now that lazy type inference is really good because it
only *inferes* what needs to be *inferred*. All the statements and modules
that are not used are just being ignored.
"""
import parso
from jedi.file_io import FileIO
from jedi import debug
from jedi import settings
from jedi.inference import imports
from jedi.inference import recursion
from jedi.inference.cache import inference_state_function_cache
from jedi.inference import helpers
from jedi.inference.names import TreeNameDefinition
from jedi.inference.base_value import ContextualizedNode, \
ValueSet, iterate_values
from jedi.inference.value import ClassValue, FunctionValue
from jedi.inference.syntax_tree import infer_expr_stmt, \
check_tuple_assignments, tree_name_to_values
from jedi.inference.imports import follow_error_node_imports_if_possible
from jedi.plugins import plugin_manager
class InferenceState:
def __init__(self, project, environment=None, script_path=None):
if environment is None:
environment = project.get_environment()
self.environment = environment
self.script_path = script_path
self.compiled_subprocess = environment.get_inference_state_subprocess(self)
self.grammar = environment.get_grammar()
self.latest_grammar = parso.load_grammar(version='3.7')
self.memoize_cache = {} # for memoize decorators
self.module_cache = imports.ModuleCache() # does the job of `sys.modules`.
self.stub_module_cache = {} # Dict[Tuple[str, ...], Optional[ModuleValue]]
self.compiled_cache = {} # see `inference.compiled.create()`
self.inferred_element_counts = {}
self.mixed_cache = {} # see `inference.compiled.mixed._create()`
self.analysis = []
self.dynamic_params_depth = 0
self.is_analysis = False
self.project = project
self.access_cache = {}
self.allow_descriptor_getattr = False
self.flow_analysis_enabled = True
self.reset_recursion_limitations()
def import_module(self, import_names, sys_path=None, prefer_stubs=True):
return imports.import_module_by_names(
self, import_names, sys_path, prefer_stubs=prefer_stubs)
@staticmethod
@plugin_manager.decorate()
def execute(value, arguments):
debug.dbg('execute: %s %s', value, arguments)
with debug.increase_indent_cm():
value_set = value.py__call__(arguments=arguments)
debug.dbg('execute result: %s in %s', value_set, value)
return value_set
# mypy doesn't suppport decorated propeties (https://github.com/python/mypy/issues/1362)
@property # type: ignore[misc]
@inference_state_function_cache()
def builtins_module(self):
module_name = 'builtins'
builtins_module, = self.import_module((module_name,), sys_path=())
return builtins_module
@property # type: ignore[misc]
@inference_state_function_cache()
def typing_module(self):
typing_module, = self.import_module(('typing',))
return typing_module
def reset_recursion_limitations(self):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
def get_sys_path(self, **kwargs):
"""Convenience function"""
return self.project._get_sys_path(self, **kwargs)
def infer(self, context, name):
def_ = name.get_definition(import_name_always=True)
if def_ is not None:
type_ = def_.type
is_classdef = type_ == 'classdef'
if is_classdef or type_ == 'funcdef':
if is_classdef:
c = ClassValue(self, context, name.parent)
else:
c = FunctionValue.from_context(context, name.parent)
return ValueSet([c])
if type_ == 'expr_stmt':
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return infer_expr_stmt(context, def_, name)
if type_ == 'for_stmt':
container_types = context.infer_node(def_.children[3])
cn = ContextualizedNode(context, def_.children[3])
for_types = iterate_values(container_types, cn)
n = TreeNameDefinition(context, name)
return check_tuple_assignments(n, for_types)
if type_ in ('import_from', 'import_name'):
return imports.infer_import(context, name)
if type_ == 'with_stmt':
return tree_name_to_values(self, context, name)
elif type_ == 'param':
return context.py__getattribute__(name.value, position=name.end_pos)
elif type_ == 'namedexpr_test':
return context.infer_node(def_)
else:
result = follow_error_node_imports_if_possible(context, name)
if result is not None:
return result
return helpers.infer_call_of_leaf(context, name)
def parse_and_get_code(self, code=None, path=None,
use_latest_grammar=False, file_io=None, **kwargs):
if path is not None:
path = str(path)
if code is None:
if file_io is None:
file_io = FileIO(path)
code = file_io.read()
# We cannot just use parso, because it doesn't use errors='replace'.
code = parso.python_bytes_to_unicode(code, encoding='utf-8', errors='replace')
if len(code) > settings._cropped_file_size:
code = code[:settings._cropped_file_size]
grammar = self.latest_grammar if use_latest_grammar else self.grammar
return grammar.parse(code=code, path=path, file_io=file_io, **kwargs), code
def parse(self, *args, **kwargs):
return self.parse_and_get_code(*args, **kwargs)[0]
| gpl-3.0 |
nicholasserra/sentry | src/sentry/api/endpoints/release_files.py | 1 | 6287 | from __future__ import absolute_import
from StringIO import StringIO
from django.db import IntegrityError, transaction
from rest_framework.negotiation import DefaultContentNegotiation
from rest_framework.parsers import FormParser, MultiPartParser
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.paginator import OffsetPaginator
from sentry.api.serializers import serialize
from sentry.models import File, Release, ReleaseFile
from sentry.utils.apidocs import scenario, attach_scenarios
ERR_FILE_EXISTS = 'A file matching this name already exists for the given release'
@scenario('UploadReleaseFile')
def upload_file_scenario(runner):
runner.request(
method='POST',
path='/projects/%s/%s/releases/%s/files/' % (
runner.org.slug, runner.default_project.slug,
runner.default_release.version),
data={
'header': 'Content-Type:text/plain; encoding=utf-8',
'name': '/demo/hello.py',
'file': ('hello.py', StringIO('print "Hello World!"')),
},
format='multipart'
)
@scenario('ListReleaseFiles')
def list_files_scenario(runner):
runner.utils.create_release_file(
project=runner.default_project,
release=runner.default_release,
path='/demo/message-for-you.txt',
contents='Hello World!'
)
runner.request(
method='GET',
path='/projects/%s/%s/releases/%s/files/' % (
runner.org.slug, runner.default_project.slug,
runner.default_release.version)
)
class ConditionalContentNegotiation(DefaultContentNegotiation):
"""
Overrides the parsers on POST to support file uploads.
"""
def select_parser(self, request, parsers):
if request.method == 'POST':
parsers = [FormParser(), MultiPartParser()]
return super(ConditionalContentNegotiation, self).select_parser(
request, parsers
)
class ReleaseFilesEndpoint(ProjectEndpoint):
doc_section = DocSection.RELEASES
content_negotiation_class = ConditionalContentNegotiation
@attach_scenarios([list_files_scenario])
def get(self, request, project, version):
"""
List a Release's Files
``````````````````````
Retrieve a list of files for a given release.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to list the
release files of.
:pparam string version: the version identifier of the release.
:auth: required
"""
try:
release = Release.objects.get(
project=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
file_list = ReleaseFile.objects.filter(
release=release,
).select_related('file', 'file__blob').order_by('name')
return self.paginate(
request=request,
queryset=file_list,
order_by='name',
paginator_cls=OffsetPaginator,
on_results=lambda x: serialize(x, request.user),
)
@attach_scenarios([upload_file_scenario])
def post(self, request, project, version):
"""
Upload a New File
`````````````````
Upload a new file for the given release.
Unlike other API requests, files must be uploaded using the
traditional multipart/form-data content-type.
The optional 'name' attribute should reflect the absolute path
that this file will be referenced as. For example, in the case of
JavaScript you might specify the full web URI.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to change the
release of.
:pparam string version: the version identifier of the release.
:param string name: the name (full path) of the file.
:param file file: the multipart encoded file.
:param string header: this parameter can be supplied multiple times
to attach headers to the file. Each header
is a string in the format ``key:value``. For
instance it can be used to define a content
type.
:auth: required
"""
try:
release = Release.objects.get(
project=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if 'file' not in request.FILES:
return Response({'detail': 'Missing uploaded file'}, status=400)
fileobj = request.FILES['file']
full_name = request.DATA.get('name', fileobj.name)
name = full_name.rsplit('/', 1)[-1]
headers = {
'Content-Type': fileobj.content_type,
}
for headerval in request.DATA.getlist('header') or ():
try:
k, v = headerval.split(':', 1)
except ValueError:
return Response({'detail': 'header value was not formatted correctly'}, status=400)
else:
headers[k] = v.strip()
file = File.objects.create(
name=name,
type='release.file',
headers=headers,
)
file.putfile(fileobj)
try:
with transaction.atomic():
releasefile = ReleaseFile.objects.create(
project=release.project,
release=release,
file=file,
name=full_name,
)
except IntegrityError:
file.delete()
return Response({'detail': ERR_FILE_EXISTS}, status=409)
return Response(serialize(releasefile, request.user), status=201)
| bsd-3-clause |
futurice/vdsm | tests/iscsiTests.py | 2 | 2056 | import threading
import time
from contextlib import contextmanager
from testrunner import VdsmTestCase as TestCaseBase
from storage import iscsi
class AsyncStubOperation(object):
def __init__(self, timeout):
self._evt = threading.Event()
if timeout == 0:
self._evt.set()
else:
threading.Timer(timeout, self._evt.set)
def wait(self, timeout=None, cond=None):
if cond is not None:
raise Exception("TODO!!!")
self._evt.wait(timeout)
def stop(self):
self._evt.set()
def result(self):
if self._evt.is_set():
return (None, None)
else:
return None
class RescanTimeoutTests(TestCaseBase):
def setUp(self):
self._iscsiadm_rescan_async = \
iscsi.iscsiadm.session_rescan_async
iscsi.iscsiadm.session_rescan_async = self._iscsi_stub
self._timeout = 0
def tearDown(self):
iscsi.iscsiadm.session_rescan_async = \
self._iscsiadm_rescan_async
def _iscsi_stub(self):
return AsyncStubOperation(self._timeout)
@contextmanager
def assertMaxDuration(self, maxtime):
start = time.time()
try:
yield
finally:
end = time.time()
elapsed = end - start
if maxtime < elapsed:
self.fail("Operation was too slow %fs > %fs" %
(elapsed, maxtime))
@contextmanager
def assertMinDuration(self, mintime):
start = time.time()
try:
yield
finally:
end = time.time()
elapsed = end - start
if mintime > elapsed:
self.fail("Operation was too fast %fs > %fs" %
(elapsed, mintime))
def testFast(self):
self._timeout = 0
with self.assertMinDuration(2):
iscsi.rescan(2, 4)
def testSlow(self):
self._timeout = 60
with self.assertMaxDuration(3):
iscsi.rescan(1, 2)
| gpl-2.0 |
HonzaKral/django | django/contrib/sessions/middleware.py | 104 | 2642 | import time
from importlib import import_module
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
class SessionMiddleware(object):
def __init__(self):
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
pass
else:
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(settings.SESSION_COOKIE_NAME,
domain=settings.SESSION_COOKIE_DOMAIN)
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/__init__.py | 5 | 3178 | """
NetworkX
========
NetworkX is a Python package for the creation, manipulation,
and study of the structure, dynamics, and functions
of complex networks.
Website (including documentation)::
http://networkx.github.io
Mailing list::
https://groups.google.com/forum/#!forum/networkx-discuss
Source::
https://github.com/networkx/networkx
Bug reports::
https://github.com/networkx/networkx/issues
Simple example
--------------
Find the shortest path between two nodes in an undirected graph::
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edge('A', 'B', weight=4)
>>> G.add_edge('B', 'D', weight=2)
>>> G.add_edge('A', 'C', weight=3)
>>> G.add_edge('C', 'D', weight=4)
>>> nx.shortest_path(G, 'A', 'D', weight='weight')
['A', 'B', 'D']
Bugs
----
Please report any bugs that you find `here <https://github.com/networkx/networkx/issues>`_.
Or, even better, fork the repository on GitHub and create a pull request (PR).
License
-------
Released under the 3-Clause BSD license::
Copyright (C) 2004-2018 NetworkX Developers
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
# Copyright (C) 2004-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Add platform dependent shared library path to sys.path
#
from __future__ import absolute_import
import sys
if sys.version_info[:2] < (2, 7):
m = "Python 2.7 or later is required for NetworkX (%d.%d detected)."
raise ImportError(m % sys.version_info[:2])
del sys
# Release data
from networkx import release
__author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \
(release.authors['Hagberg'] + release.authors['Schult'] +
release.authors['Swart'])
__license__ = release.license
__date__ = release.date
__version__ = release.version
__bibtex__ = """@inproceedings{hagberg-2008-exploring,
author = {Aric A. Hagberg and Daniel A. Schult and Pieter J. Swart},
title = {Exploring network structure, dynamics, and function using {NetworkX}},
year = {2008},
month = Aug,
urlpdf = {http://math.lanl.gov/~hagberg/Papers/hagberg-2008-exploring.pdf},
booktitle = {Proceedings of the 7th Python in Science Conference (SciPy2008)},
editors = {G\"{a}el Varoquaux, Travis Vaught, and Jarrod Millman},
address = {Pasadena, CA USA},
pages = {11--15}
}"""
# These are import orderwise
from networkx.exception import *
import networkx.utils
import networkx.classes.filters
import networkx.classes
from networkx.classes import *
import networkx.convert
from networkx.convert import *
import networkx.convert_matrix
from networkx.convert_matrix import *
import networkx.relabel
from networkx.relabel import *
import networkx.generators
from networkx.generators import *
import networkx.readwrite
from networkx.readwrite import *
# Need to test with SciPy, when available
import networkx.algorithms
from networkx.algorithms import *
import networkx.linalg
from networkx.linalg import *
from networkx.tests.test import run as test
import networkx.drawing
from networkx.drawing import *
| gpl-3.0 |
resmio/heroku-buildpack-geodjango | vendor/pip-pop/docopt.py | 283 | 19784 | """Pythonic command-line interface parser that will make you smile.
* http://docopt.org
* Repository and issue-tracker: https://github.com/docopt/docopt
* Licensed under terms of MIT license (see LICENSE-MIT)
* Copyright (c) 2013 Vladimir Keleshev, vladimir@keleshev.com
"""
import sys
import re
__all__ = ['docopt']
__version__ = '0.6.1'
class DocoptLanguageError(Exception):
"""Error in construction of usage-message by developer."""
class DocoptExit(SystemExit):
"""Exit in case user invoked program with incorrect arguments."""
usage = ''
def __init__(self, message=''):
SystemExit.__init__(self, (message + '\n' + self.usage).strip())
class Pattern(object):
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def fix(self):
self.fix_identities()
self.fix_repeating_arguments()
return self
def fix_identities(self, uniq=None):
"""Make pattern-tree tips point to same object if they are equal."""
if not hasattr(self, 'children'):
return self
uniq = list(set(self.flat())) if uniq is None else uniq
for i, child in enumerate(self.children):
if not hasattr(child, 'children'):
assert child in uniq
self.children[i] = uniq[uniq.index(child)]
else:
child.fix_identities(uniq)
def fix_repeating_arguments(self):
"""Fix elements that should accumulate/increment values."""
either = [list(child.children) for child in transform(self).children]
for case in either:
for e in [child for child in case if case.count(child) > 1]:
if type(e) is Argument or type(e) is Option and e.argcount:
if e.value is None:
e.value = []
elif type(e.value) is not list:
e.value = e.value.split()
if type(e) is Command or type(e) is Option and e.argcount == 0:
e.value = 0
return self
def transform(pattern):
"""Expand pattern into an (almost) equivalent one, but with single Either.
Example: ((-a | -b) (-c | -d)) => (-a -c | -a -d | -b -c | -b -d)
Quirks: [-a] => (-a), (-a...) => (-a -a)
"""
result = []
groups = [[pattern]]
while groups:
children = groups.pop(0)
parents = [Required, Optional, OptionsShortcut, Either, OneOrMore]
if any(t in map(type, children) for t in parents):
child = [c for c in children if type(c) in parents][0]
children.remove(child)
if type(child) is Either:
for c in child.children:
groups.append([c] + children)
elif type(child) is OneOrMore:
groups.append(child.children * 2 + children)
else:
groups.append(child.children + children)
else:
result.append(children)
return Either(*[Required(*e) for e in result])
class LeafPattern(Pattern):
"""Leaf/terminal node of a pattern tree."""
def __init__(self, name, value=None):
self.name, self.value = name, value
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value)
def flat(self, *types):
return [self] if not types or type(self) in types else []
def match(self, left, collected=None):
collected = [] if collected is None else collected
pos, match = self.single_match(left)
if match is None:
return False, left, collected
left_ = left[:pos] + left[pos + 1:]
same_name = [a for a in collected if a.name == self.name]
if type(self.value) in (int, list):
if type(self.value) is int:
increment = 1
else:
increment = ([match.value] if type(match.value) is str
else match.value)
if not same_name:
match.value = increment
return True, left_, collected + [match]
same_name[0].value += increment
return True, left_, collected
return True, left_, collected + [match]
class BranchPattern(Pattern):
"""Branch/inner node of a pattern tree."""
def __init__(self, *children):
self.children = list(children)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(repr(a) for a in self.children))
def flat(self, *types):
if type(self) in types:
return [self]
return sum([child.flat(*types) for child in self.children], [])
class Argument(LeafPattern):
def single_match(self, left):
for n, pattern in enumerate(left):
if type(pattern) is Argument:
return n, Argument(self.name, pattern.value)
return None, None
@classmethod
def parse(class_, source):
name = re.findall('(<\S*?>)', source)[0]
value = re.findall('\[default: (.*)\]', source, flags=re.I)
return class_(name, value[0] if value else None)
class Command(Argument):
def __init__(self, name, value=False):
self.name, self.value = name, value
def single_match(self, left):
for n, pattern in enumerate(left):
if type(pattern) is Argument:
if pattern.value == self.name:
return n, Command(self.name, True)
else:
break
return None, None
class Option(LeafPattern):
def __init__(self, short=None, long=None, argcount=0, value=False):
assert argcount in (0, 1)
self.short, self.long, self.argcount = short, long, argcount
self.value = None if value is False and argcount else value
@classmethod
def parse(class_, option_description):
short, long, argcount, value = None, None, 0, False
options, _, description = option_description.strip().partition(' ')
options = options.replace(',', ' ').replace('=', ' ')
for s in options.split():
if s.startswith('--'):
long = s
elif s.startswith('-'):
short = s
else:
argcount = 1
if argcount:
matched = re.findall('\[default: (.*)\]', description, flags=re.I)
value = matched[0] if matched else None
return class_(short, long, argcount, value)
def single_match(self, left):
for n, pattern in enumerate(left):
if self.name == pattern.name:
return n, pattern
return None, None
@property
def name(self):
return self.long or self.short
def __repr__(self):
return 'Option(%r, %r, %r, %r)' % (self.short, self.long,
self.argcount, self.value)
class Required(BranchPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
l = left
c = collected
for pattern in self.children:
matched, l, c = pattern.match(l, c)
if not matched:
return False, left, collected
return True, l, c
class Optional(BranchPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
for pattern in self.children:
m, left, collected = pattern.match(left, collected)
return True, left, collected
class OptionsShortcut(Optional):
"""Marker/placeholder for [options] shortcut."""
class OneOrMore(BranchPattern):
def match(self, left, collected=None):
assert len(self.children) == 1
collected = [] if collected is None else collected
l = left
c = collected
l_ = None
matched = True
times = 0
while matched:
# could it be that something didn't match but changed l or c?
matched, l, c = self.children[0].match(l, c)
times += 1 if matched else 0
if l_ == l:
break
l_ = l
if times >= 1:
return True, l, c
return False, left, collected
class Either(BranchPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
outcomes = []
for pattern in self.children:
matched, _, _ = outcome = pattern.match(left, collected)
if matched:
outcomes.append(outcome)
if outcomes:
return min(outcomes, key=lambda outcome: len(outcome[1]))
return False, left, collected
class Tokens(list):
def __init__(self, source, error=DocoptExit):
self += source.split() if hasattr(source, 'split') else source
self.error = error
@staticmethod
def from_pattern(source):
source = re.sub(r'([\[\]\(\)\|]|\.\.\.)', r' \1 ', source)
source = [s for s in re.split('\s+|(\S*<.*?>)', source) if s]
return Tokens(source, error=DocoptLanguageError)
def move(self):
return self.pop(0) if len(self) else None
def current(self):
return self[0] if len(self) else None
def parse_long(tokens, options):
"""long ::= '--' chars [ ( ' ' | '=' ) chars ] ;"""
long, eq, value = tokens.move().partition('=')
assert long.startswith('--')
value = None if eq == value == '' else value
similar = [o for o in options if o.long == long]
if tokens.error is DocoptExit and similar == []: # if no exact match
similar = [o for o in options if o.long and o.long.startswith(long)]
if len(similar) > 1: # might be simply specified ambiguously 2+ times?
raise tokens.error('%s is not a unique prefix: %s?' %
(long, ', '.join(o.long for o in similar)))
elif len(similar) < 1:
argcount = 1 if eq == '=' else 0
o = Option(None, long, argcount)
options.append(o)
if tokens.error is DocoptExit:
o = Option(None, long, argcount, value if argcount else True)
else:
o = Option(similar[0].short, similar[0].long,
similar[0].argcount, similar[0].value)
if o.argcount == 0:
if value is not None:
raise tokens.error('%s must not have an argument' % o.long)
else:
if value is None:
if tokens.current() in [None, '--']:
raise tokens.error('%s requires argument' % o.long)
value = tokens.move()
if tokens.error is DocoptExit:
o.value = value if value is not None else True
return [o]
def parse_shorts(tokens, options):
"""shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;"""
token = tokens.move()
assert token.startswith('-') and not token.startswith('--')
left = token.lstrip('-')
parsed = []
while left != '':
short, left = '-' + left[0], left[1:]
similar = [o for o in options if o.short == short]
if len(similar) > 1:
raise tokens.error('%s is specified ambiguously %d times' %
(short, len(similar)))
elif len(similar) < 1:
o = Option(short, None, 0)
options.append(o)
if tokens.error is DocoptExit:
o = Option(short, None, 0, True)
else: # why copying is necessary here?
o = Option(short, similar[0].long,
similar[0].argcount, similar[0].value)
value = None
if o.argcount != 0:
if left == '':
if tokens.current() in [None, '--']:
raise tokens.error('%s requires argument' % short)
value = tokens.move()
else:
value = left
left = ''
if tokens.error is DocoptExit:
o.value = value if value is not None else True
parsed.append(o)
return parsed
def parse_pattern(source, options):
tokens = Tokens.from_pattern(source)
result = parse_expr(tokens, options)
if tokens.current() is not None:
raise tokens.error('unexpected ending: %r' % ' '.join(tokens))
return Required(*result)
def parse_expr(tokens, options):
"""expr ::= seq ( '|' seq )* ;"""
seq = parse_seq(tokens, options)
if tokens.current() != '|':
return seq
result = [Required(*seq)] if len(seq) > 1 else seq
while tokens.current() == '|':
tokens.move()
seq = parse_seq(tokens, options)
result += [Required(*seq)] if len(seq) > 1 else seq
return [Either(*result)] if len(result) > 1 else result
def parse_seq(tokens, options):
"""seq ::= ( atom [ '...' ] )* ;"""
result = []
while tokens.current() not in [None, ']', ')', '|']:
atom = parse_atom(tokens, options)
if tokens.current() == '...':
atom = [OneOrMore(*atom)]
tokens.move()
result += atom
return result
def parse_atom(tokens, options):
"""atom ::= '(' expr ')' | '[' expr ']' | 'options'
| long | shorts | argument | command ;
"""
token = tokens.current()
result = []
if token in '([':
tokens.move()
matching, pattern = {'(': [')', Required], '[': [']', Optional]}[token]
result = pattern(*parse_expr(tokens, options))
if tokens.move() != matching:
raise tokens.error("unmatched '%s'" % token)
return [result]
elif token == 'options':
tokens.move()
return [OptionsShortcut()]
elif token.startswith('--') and token != '--':
return parse_long(tokens, options)
elif token.startswith('-') and token not in ('-', '--'):
return parse_shorts(tokens, options)
elif token.startswith('<') and token.endswith('>') or token.isupper():
return [Argument(tokens.move())]
else:
return [Command(tokens.move())]
def parse_argv(tokens, options, options_first=False):
"""Parse command-line argument vector.
If options_first:
argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ;
else:
argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
"""
parsed = []
while tokens.current() is not None:
if tokens.current() == '--':
return parsed + [Argument(None, v) for v in tokens]
elif tokens.current().startswith('--'):
parsed += parse_long(tokens, options)
elif tokens.current().startswith('-') and tokens.current() != '-':
parsed += parse_shorts(tokens, options)
elif options_first:
return parsed + [Argument(None, v) for v in tokens]
else:
parsed.append(Argument(None, tokens.move()))
return parsed
def parse_defaults(doc):
defaults = []
for s in parse_section('options:', doc):
# FIXME corner case "bla: options: --foo"
_, _, s = s.partition(':') # get rid of "options:"
split = re.split('\n[ \t]*(-\S+?)', '\n' + s)[1:]
split = [s1 + s2 for s1, s2 in zip(split[::2], split[1::2])]
options = [Option.parse(s) for s in split if s.startswith('-')]
defaults += options
return defaults
def parse_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
def formal_usage(section):
_, _, section = section.partition(':') # drop "usage:"
pu = section.split()
return '( ' + ' '.join(') | (' if s == pu[0] else s for s in pu[1:]) + ' )'
def extras(help, version, options, doc):
if help and any((o.name in ('-h', '--help')) and o.value for o in options):
print(doc.strip("\n"))
sys.exit()
if version and any(o.name == '--version' and o.value for o in options):
print(version)
sys.exit()
class Dict(dict):
def __repr__(self):
return '{%s}' % ',\n '.join('%r: %r' % i for i in sorted(self.items()))
def docopt(doc, argv=None, help=True, version=None, options_first=False):
"""Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options precede positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
"""
argv = sys.argv[1:] if argv is None else argv
usage_sections = parse_section('usage:', doc)
if len(usage_sections) == 0:
raise DocoptLanguageError('"usage:" (case-insensitive) not found.')
if len(usage_sections) > 1:
raise DocoptLanguageError('More than one "usage:" (case-insensitive).')
DocoptExit.usage = usage_sections[0]
options = parse_defaults(doc)
pattern = parse_pattern(formal_usage(DocoptExit.usage), options)
# [default] syntax for argument is disabled
#for a in pattern.flat(Argument):
# same_name = [d for d in arguments if d.name == a.name]
# if same_name:
# a.value = same_name[0].value
argv = parse_argv(Tokens(argv), list(options), options_first)
pattern_options = set(pattern.flat(Option))
for options_shortcut in pattern.flat(OptionsShortcut):
doc_options = parse_defaults(doc)
options_shortcut.children = list(set(doc_options) - pattern_options)
#if any_options:
# options_shortcut.children += [Option(o.short, o.long, o.argcount)
# for o in argv if type(o) is Option]
extras(help, version, argv, doc)
matched, left, collected = pattern.fix().match(argv)
if matched and left == []: # better error message if left?
return Dict((a.name, a.value) for a in (pattern.flat() + collected))
raise DocoptExit()
| mit |
mattgiguere/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
nhr/openshift-ansible | roles/openshift_health_checker/test/openshift_check_test.py | 4 | 2699 | import pytest
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
from openshift_checks import load_checks, get_var
# Fixtures
@pytest.fixture()
def task_vars():
return dict(foo=42, bar=dict(baz="openshift"))
@pytest.fixture(params=[
("notfound",),
("multiple", "keys", "not", "in", "task_vars"),
])
def missing_keys(request):
return request.param
# Tests
def test_OpenShiftCheck_init():
class TestCheck(OpenShiftCheck):
name = "test_check"
run = NotImplemented
# initialization requires at least one argument (apart from self)
with pytest.raises(TypeError) as excinfo:
TestCheck()
assert 'execute_module' in str(excinfo.value)
assert 'module_executor' in str(excinfo.value)
execute_module = object()
# initialize with positional argument
check = TestCheck(execute_module)
# new recommended name
assert check.execute_module == execute_module
# deprecated attribute name
assert check.module_executor == execute_module
# initialize with keyword argument, recommended name
check = TestCheck(execute_module=execute_module)
# new recommended name
assert check.execute_module == execute_module
# deprecated attribute name
assert check.module_executor == execute_module
# initialize with keyword argument, deprecated name
check = TestCheck(module_executor=execute_module)
# new recommended name
assert check.execute_module == execute_module
# deprecated attribute name
assert check.module_executor == execute_module
def test_subclasses():
"""OpenShiftCheck.subclasses should find all subclasses recursively."""
class TestCheck1(OpenShiftCheck):
pass
class TestCheck2(OpenShiftCheck):
pass
class TestCheck1A(TestCheck1):
pass
local_subclasses = set([TestCheck1, TestCheck1A, TestCheck2])
known_subclasses = set(OpenShiftCheck.subclasses())
assert local_subclasses - known_subclasses == set(), "local_subclasses should be a subset of known_subclasses"
def test_load_checks():
"""Loading checks should load and return Python modules."""
modules = load_checks()
assert modules
@pytest.mark.parametrize("keys,expected", [
(("foo",), 42),
(("bar", "baz"), "openshift"),
])
def test_get_var_ok(task_vars, keys, expected):
assert get_var(task_vars, *keys) == expected
def test_get_var_error(task_vars, missing_keys):
with pytest.raises(OpenShiftCheckException):
get_var(task_vars, *missing_keys)
def test_get_var_default(task_vars, missing_keys):
default = object()
assert get_var(task_vars, *missing_keys, default=default) == default
| apache-2.0 |
cesargtz/YecoraOdoo | addons/hr_timesheet/__init__.py | 410 | 1104 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Tatsh-ansible/ansible | lib/ansible/plugins/callback/hipchat.py | 43 | 6195 | # (C) 2014, Matt Martz <matt@sivel.net>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
try:
import prettytable
HAS_PRETTYTABLE = True
except ImportError:
HAS_PRETTYTABLE = False
from ansible.plugins.callback import CallbackBase
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import open_url
class CallbackModule(CallbackBase):
"""This is an example ansible callback plugin that sends status
updates to a HipChat channel during playbook execution.
This plugin makes use of the following environment variables:
HIPCHAT_TOKEN (required): HipChat API token
HIPCHAT_ROOM (optional): HipChat room to post in. Default: ansible
HIPCHAT_FROM (optional): Name to post as. Default: ansible
HIPCHAT_NOTIFY (optional): Add notify flag to important messages ("true" or "false"). Default: true
Requires:
prettytable
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'hipchat'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
if not HAS_PRETTYTABLE:
self.disabled = True
self._display.warning('The `prettytable` python module is not installed. '
'Disabling the HipChat callback plugin.')
self.msg_uri = 'https://api.hipchat.com/v1/rooms/message'
self.token = os.getenv('HIPCHAT_TOKEN')
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
if self.token is None:
self.disabled = True
self._display.warning('HipChat token could not be loaded. The HipChat '
'token can be provided using the `HIPCHAT_TOKEN` '
'environment variable.')
self.printed_playbook = False
self.playbook_name = None
self.play = None
def send_msg(self, msg, msg_format='text', color='yellow', notify=False):
"""Method for sending a message to HipChat"""
params = {}
params['room_id'] = self.room
params['from'] = self.from_name[:15] # max length is 15
params['message'] = msg
params['message_format'] = msg_format
params['color'] = color
params['notify'] = int(self.allow_notify and notify)
url = ('%s?auth_token=%s' % (self.msg_uri, self.token))
try:
response = open_url(url, data=urlencode(params))
return response.read()
except:
self._display.warning('Could not submit message to hipchat')
def v2_playbook_on_play_start(self, play):
"""Display Playbook and play start messages"""
self.play = play
name = play.name
# This block sends information about a playbook when it starts
# The playbook object is not immediately available at
# playbook_on_start so we grab it via the play
#
# Displays info about playbook being started by a person on an
# inventory, as well as Tags, Skip Tags and Limits
if not self.printed_playbook:
self.playbook_name, _ = os.path.splitext(
os.path.basename(self.play.playbook.filename))
host_list = self.play.playbook.inventory.host_list
inventory = os.path.basename(os.path.realpath(host_list))
self.send_msg("%s: Playbook initiated by %s against %s" %
(self.playbook_name,
self.play.playbook.remote_user,
inventory), notify=True)
self.printed_playbook = True
subset = self.play.playbook.inventory._subset
skip_tags = self.play.playbook.skip_tags
self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" %
(self.playbook_name,
', '.join(self.play.playbook.only_tags),
', '.join(skip_tags) if skip_tags else None,
', '.join(subset) if subset else subset))
# This is where we actually say we are starting a play
self.send_msg("%s: Starting play: %s" %
(self.playbook_name, name))
def playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
'Failures'])
failures = False
unreachable = False
for h in hosts:
s = stats.summarize(h)
if s['failures'] > 0:
failures = True
if s['unreachable'] > 0:
unreachable = True
t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
'failures']])
self.send_msg("%s: Playbook complete" % self.playbook_name,
notify=True)
if failures or unreachable:
color = 'red'
self.send_msg("%s: Failures detected" % self.playbook_name,
color=color, notify=True)
else:
color = 'green'
self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color)
| gpl-3.0 |
egabancho/invenio | invenio/legacy/bibcatalog/templates.py | 3 | 3374 | ## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibCatalog HTML generator."""
from invenio.legacy.bibcatalog.api import BIBCATALOG_SYSTEM
from invenio.base.i18n import wash_language, gettext_set_language
from invenio.config import CFG_SITE_LANG
from invenio.legacy.webstyle.templates import Template as DefaultTemplate
class Template(DefaultTemplate):
""" HTML generators for BibCatalog """
SHOW_MAX_TICKETS = 25
def tmpl_your_tickets(self, uid, ln=CFG_SITE_LANG, start=1):
""" make a pretty html body of tickets that belong to the user given as param """
ln = wash_language(ln)
_ = gettext_set_language(ln)
if BIBCATALOG_SYSTEM is None:
return _("Error: No BibCatalog system configured.")
#errors? tell what happened and get out
bibcat_probs = BIBCATALOG_SYSTEM.check_system(uid)
if bibcat_probs:
return _("Error")+" "+bibcat_probs
tickets = BIBCATALOG_SYSTEM.ticket_search(uid, owner=uid) # get ticket id's
lines = "" # put result here
i = 1
lines += (_("You have %(x_num)i tickets.", x_num=len(tickets))) + "<br/>"
#make a prev link if needed
if (start > 1):
newstart = start - self.SHOW_MAX_TICKETS
if (newstart < 1):
newstart = 1
lines += '<a href="/yourtickets/display?start='+str(newstart)+'">'+_("Previous")+'</a>'
lines += """<table border="1">"""
lastshown = len(tickets) # what was the number of the last shown ticket?
for ticket in tickets:
#get info and show only for those that within the show range
if (i >= start) and (i < start+self.SHOW_MAX_TICKETS):
ticket_info = BIBCATALOG_SYSTEM.ticket_get_info(uid, ticket)
subject = ticket_info['subject']
status = ticket_info['status']
text = ""
if 'text' in ticket_info:
text = ticket_info['text']
display = '<a href="'+ticket_info['url_display']+'">'+_("show")+'</a>'
close = '<a href="'+ticket_info['url_close']+'">'+_("close")+'</a>'
lines += "<tr><td>"+str(ticket)+"</td><td>"+subject+" "+text+"</td><td>"+status+"</td><td>"+display+"</td><td>"+close+"</td></tr>\n"
lastshown = i
i = i+1
lines += "</table>"
#make next link if needed
if (len(tickets) > lastshown):
newstart = lastshown+1
lines += '<a href="/yourtickets/display?start='+str(newstart)+'">'+_("Next")+'</a>'
return lines
| gpl-2.0 |
NEricN/RobotCSimulator | Python/App/Lib/msilib/__init__.py | 37 | 17575 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005 Martin v. Löwis
# Licensed to PSF under a Contributor Agreement.
from _msi import *
import os, string, re, sys
AMD64 = "AMD64" in sys.version
Itanium = "Itanium" in sys.version
Win64 = AMD64 or Itanium
# Partially taken from Wine
datasizemask= 0x00ff
type_valid= 0x0100
type_localizable= 0x0200
typemask= 0x0c00
type_long= 0x0000
type_short= 0x0400
type_string= 0x0c00
type_binary= 0x0800
type_nullable= 0x1000
type_key= 0x2000
# XXX temporary, localizable?
knownbits = datasizemask | type_valid | type_localizable | \
typemask | type_nullable | type_key
class Table:
def __init__(self, name):
self.name = name
self.fields = []
def add_field(self, index, name, type):
self.fields.append((index,name,type))
def sql(self):
fields = []
keys = []
self.fields.sort()
fields = [None]*len(self.fields)
for index, name, type in self.fields:
index -= 1
unk = type & ~knownbits
if unk:
print "%s.%s unknown bits %x" % (self.name, name, unk)
size = type & datasizemask
dtype = type & typemask
if dtype == type_string:
if size:
tname="CHAR(%d)" % size
else:
tname="CHAR"
elif dtype == type_short:
assert size==2
tname = "SHORT"
elif dtype == type_long:
assert size==4
tname="LONG"
elif dtype == type_binary:
assert size==0
tname="OBJECT"
else:
tname="unknown"
print "%s.%sunknown integer type %d" % (self.name, name, size)
if type & type_nullable:
flags = ""
else:
flags = " NOT NULL"
if type & type_localizable:
flags += " LOCALIZABLE"
fields[index] = "`%s` %s%s" % (name, tname, flags)
if type & type_key:
keys.append("`%s`" % name)
fields = ", ".join(fields)
keys = ", ".join(keys)
return "CREATE TABLE %s (%s PRIMARY KEY %s)" % (self.name, fields, keys)
def create(self, db):
v = db.OpenView(self.sql())
v.Execute(None)
v.Close()
class _Unspecified:pass
def change_sequence(seq, action, seqno=_Unspecified, cond = _Unspecified):
"Change the sequence number of an action in a sequence list"
for i in range(len(seq)):
if seq[i][0] == action:
if cond is _Unspecified:
cond = seq[i][1]
if seqno is _Unspecified:
seqno = seq[i][2]
seq[i] = (action, cond, seqno)
return
raise ValueError, "Action not found in sequence"
def add_data(db, table, values):
v = db.OpenView("SELECT * FROM `%s`" % table)
count = v.GetColumnInfo(MSICOLINFO_NAMES).GetFieldCount()
r = CreateRecord(count)
for value in values:
assert len(value) == count, value
for i in range(count):
field = value[i]
if isinstance(field, (int, long)):
r.SetInteger(i+1,field)
elif isinstance(field, basestring):
r.SetString(i+1,field)
elif field is None:
pass
elif isinstance(field, Binary):
r.SetStream(i+1, field.name)
else:
raise TypeError, "Unsupported type %s" % field.__class__.__name__
try:
v.Modify(MSIMODIFY_INSERT, r)
except Exception, e:
raise MSIError("Could not insert "+repr(values)+" into "+table)
r.ClearData()
v.Close()
def add_stream(db, name, path):
v = db.OpenView("INSERT INTO _Streams (Name, Data) VALUES ('%s', ?)" % name)
r = CreateRecord(1)
r.SetStream(1, path)
v.Execute(r)
v.Close()
def init_database(name, schema,
ProductName, ProductCode, ProductVersion,
Manufacturer):
try:
os.unlink(name)
except OSError:
pass
ProductCode = ProductCode.upper()
# Create the database
db = OpenDatabase(name, MSIDBOPEN_CREATE)
# Create the tables
for t in schema.tables:
t.create(db)
# Fill the validation table
add_data(db, "_Validation", schema._Validation_records)
# Initialize the summary information, allowing atmost 20 properties
si = db.GetSummaryInformation(20)
si.SetProperty(PID_TITLE, "Installation Database")
si.SetProperty(PID_SUBJECT, ProductName)
si.SetProperty(PID_AUTHOR, Manufacturer)
if Itanium:
si.SetProperty(PID_TEMPLATE, "Intel64;1033")
elif AMD64:
si.SetProperty(PID_TEMPLATE, "x64;1033")
else:
si.SetProperty(PID_TEMPLATE, "Intel;1033")
si.SetProperty(PID_REVNUMBER, gen_uuid())
si.SetProperty(PID_WORDCOUNT, 2) # long file names, compressed, original media
si.SetProperty(PID_PAGECOUNT, 200)
si.SetProperty(PID_APPNAME, "Python MSI Library")
# XXX more properties
si.Persist()
add_data(db, "Property", [
("ProductName", ProductName),
("ProductCode", ProductCode),
("ProductVersion", ProductVersion),
("Manufacturer", Manufacturer),
("ProductLanguage", "1033")])
db.Commit()
return db
def add_tables(db, module):
for table in module.tables:
add_data(db, table, getattr(module, table))
def make_id(str):
identifier_chars = string.ascii_letters + string.digits + "._"
str = "".join([c if c in identifier_chars else "_" for c in str])
if str[0] in (string.digits + "."):
str = "_" + str
assert re.match("^[A-Za-z_][A-Za-z0-9_.]*$", str), "FILE"+str
return str
def gen_uuid():
return "{"+UuidCreate().upper()+"}"
class CAB:
def __init__(self, name):
self.name = name
self.files = []
self.filenames = set()
self.index = 0
def gen_id(self, file):
logical = _logical = make_id(file)
pos = 1
while logical in self.filenames:
logical = "%s.%d" % (_logical, pos)
pos += 1
self.filenames.add(logical)
return logical
def append(self, full, file, logical):
if os.path.isdir(full):
return
if not logical:
logical = self.gen_id(file)
self.index += 1
self.files.append((full, logical))
return self.index, logical
def commit(self, db):
from tempfile import mktemp
filename = mktemp()
FCICreate(filename, self.files)
add_data(db, "Media",
[(1, self.index, None, "#"+self.name, None, None)])
add_stream(db, self.name, filename)
os.unlink(filename)
db.Commit()
_directories = set()
class Directory:
def __init__(self, db, cab, basedir, physical, _logical, default, componentflags=None):
"""Create a new directory in the Directory table. There is a current component
at each point in time for the directory, which is either explicitly created
through start_component, or implicitly when files are added for the first
time. Files are added into the current component, and into the cab file.
To create a directory, a base directory object needs to be specified (can be
None), the path to the physical directory, and a logical directory name.
Default specifies the DefaultDir slot in the directory table. componentflags
specifies the default flags that new components get."""
index = 1
_logical = make_id(_logical)
logical = _logical
while logical in _directories:
logical = "%s%d" % (_logical, index)
index += 1
_directories.add(logical)
self.db = db
self.cab = cab
self.basedir = basedir
self.physical = physical
self.logical = logical
self.component = None
self.short_names = set()
self.ids = set()
self.keyfiles = {}
self.componentflags = componentflags
if basedir:
self.absolute = os.path.join(basedir.absolute, physical)
blogical = basedir.logical
else:
self.absolute = physical
blogical = None
add_data(db, "Directory", [(logical, blogical, default)])
def start_component(self, component = None, feature = None, flags = None, keyfile = None, uuid=None):
"""Add an entry to the Component table, and make this component the current for this
directory. If no component name is given, the directory name is used. If no feature
is given, the current feature is used. If no flags are given, the directory's default
flags are used. If no keyfile is given, the KeyPath is left null in the Component
table."""
if flags is None:
flags = self.componentflags
if uuid is None:
uuid = gen_uuid()
else:
uuid = uuid.upper()
if component is None:
component = self.logical
self.component = component
if Win64:
flags |= 256
if keyfile:
keyid = self.cab.gen_id(self.absolute, keyfile)
self.keyfiles[keyfile] = keyid
else:
keyid = None
add_data(self.db, "Component",
[(component, uuid, self.logical, flags, None, keyid)])
if feature is None:
feature = current_feature
add_data(self.db, "FeatureComponents",
[(feature.id, component)])
def make_short(self, file):
oldfile = file
file = file.replace('+', '_')
file = ''.join(c for c in file if not c in ' "/\[]:;=,')
parts = file.split(".")
if len(parts) > 1:
prefix = "".join(parts[:-1]).upper()
suffix = parts[-1].upper()
if not prefix:
prefix = suffix
suffix = None
else:
prefix = file.upper()
suffix = None
if len(parts) < 3 and len(prefix) <= 8 and file == oldfile and (
not suffix or len(suffix) <= 3):
if suffix:
file = prefix+"."+suffix
else:
file = prefix
else:
file = None
if file is None or file in self.short_names:
prefix = prefix[:6]
if suffix:
suffix = suffix[:3]
pos = 1
while 1:
if suffix:
file = "%s~%d.%s" % (prefix, pos, suffix)
else:
file = "%s~%d" % (prefix, pos)
if file not in self.short_names: break
pos += 1
assert pos < 10000
if pos in (10, 100, 1000):
prefix = prefix[:-1]
self.short_names.add(file)
assert not re.search(r'[\?|><:/*"+,;=\[\]]', file) # restrictions on short names
return file
def add_file(self, file, src=None, version=None, language=None):
"""Add a file to the current component of the directory, starting a new one
if there is no current component. By default, the file name in the source
and the file table will be identical. If the src file is specified, it is
interpreted relative to the current directory. Optionally, a version and a
language can be specified for the entry in the File table."""
if not self.component:
self.start_component(self.logical, current_feature, 0)
if not src:
# Allow relative paths for file if src is not specified
src = file
file = os.path.basename(file)
absolute = os.path.join(self.absolute, src)
assert not re.search(r'[\?|><:/*]"', file) # restrictions on long names
if file in self.keyfiles:
logical = self.keyfiles[file]
else:
logical = None
sequence, logical = self.cab.append(absolute, file, logical)
assert logical not in self.ids
self.ids.add(logical)
short = self.make_short(file)
full = "%s|%s" % (short, file)
filesize = os.stat(absolute).st_size
# constants.msidbFileAttributesVital
# Compressed omitted, since it is the database default
# could add r/o, system, hidden
attributes = 512
add_data(self.db, "File",
[(logical, self.component, full, filesize, version,
language, attributes, sequence)])
#if not version:
# # Add hash if the file is not versioned
# filehash = FileHash(absolute, 0)
# add_data(self.db, "MsiFileHash",
# [(logical, 0, filehash.IntegerData(1),
# filehash.IntegerData(2), filehash.IntegerData(3),
# filehash.IntegerData(4))])
# Automatically remove .pyc/.pyo files on uninstall (2)
# XXX: adding so many RemoveFile entries makes installer unbelievably
# slow. So instead, we have to use wildcard remove entries
if file.endswith(".py"):
add_data(self.db, "RemoveFile",
[(logical+"c", self.component, "%sC|%sc" % (short, file),
self.logical, 2),
(logical+"o", self.component, "%sO|%so" % (short, file),
self.logical, 2)])
return logical
def glob(self, pattern, exclude = None):
"""Add a list of files to the current component as specified in the
glob pattern. Individual files can be excluded in the exclude list."""
files = glob.glob1(self.absolute, pattern)
for f in files:
if exclude and f in exclude: continue
self.add_file(f)
return files
def remove_pyc(self):
"Remove .pyc/.pyo files on uninstall"
add_data(self.db, "RemoveFile",
[(self.component+"c", self.component, "*.pyc", self.logical, 2),
(self.component+"o", self.component, "*.pyo", self.logical, 2)])
class Binary:
def __init__(self, fname):
self.name = fname
def __repr__(self):
return 'msilib.Binary(os.path.join(dirname,"%s"))' % self.name
class Feature:
def __init__(self, db, id, title, desc, display, level = 1,
parent=None, directory = None, attributes=0):
self.id = id
if parent:
parent = parent.id
add_data(db, "Feature",
[(id, parent, title, desc, display,
level, directory, attributes)])
def set_current(self):
global current_feature
current_feature = self
class Control:
def __init__(self, dlg, name):
self.dlg = dlg
self.name = name
def event(self, event, argument, condition = "1", ordering = None):
add_data(self.dlg.db, "ControlEvent",
[(self.dlg.name, self.name, event, argument,
condition, ordering)])
def mapping(self, event, attribute):
add_data(self.dlg.db, "EventMapping",
[(self.dlg.name, self.name, event, attribute)])
def condition(self, action, condition):
add_data(self.dlg.db, "ControlCondition",
[(self.dlg.name, self.name, action, condition)])
class RadioButtonGroup(Control):
def __init__(self, dlg, name, property):
self.dlg = dlg
self.name = name
self.property = property
self.index = 1
def add(self, name, x, y, w, h, text, value = None):
if value is None:
value = name
add_data(self.dlg.db, "RadioButton",
[(self.property, self.index, value,
x, y, w, h, text, None)])
self.index += 1
class Dialog:
def __init__(self, db, name, x, y, w, h, attr, title, first, default, cancel):
self.db = db
self.name = name
self.x, self.y, self.w, self.h = x,y,w,h
add_data(db, "Dialog", [(name, x,y,w,h,attr,title,first,default,cancel)])
def control(self, name, type, x, y, w, h, attr, prop, text, next, help):
add_data(self.db, "Control",
[(self.name, name, type, x, y, w, h, attr, prop, text, next, help)])
return Control(self, name)
def text(self, name, x, y, w, h, attr, text):
return self.control(name, "Text", x, y, w, h, attr, None,
text, None, None)
def bitmap(self, name, x, y, w, h, text):
return self.control(name, "Bitmap", x, y, w, h, 1, None, text, None, None)
def line(self, name, x, y, w, h):
return self.control(name, "Line", x, y, w, h, 1, None, None, None, None)
def pushbutton(self, name, x, y, w, h, attr, text, next):
return self.control(name, "PushButton", x, y, w, h, attr, None, text, next, None)
def radiogroup(self, name, x, y, w, h, attr, prop, text, next):
add_data(self.db, "Control",
[(self.name, name, "RadioButtonGroup",
x, y, w, h, attr, prop, text, next, None)])
return RadioButtonGroup(self, name, prop)
def checkbox(self, name, x, y, w, h, attr, prop, text, next):
return self.control(name, "CheckBox", x, y, w, h, attr, prop, text, next, None)
| apache-2.0 |
snikch/elasticsearch | dev-tools/get-bwc-version.py | 136 | 3149 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
'''
Downloads and extracts elasticsearch for backwards compatibility tests.
'''
import argparse
import os
import platform
import shutil
import subprocess
import urllib.request
import zipfile
def parse_config():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--path', metavar='DIR', default='./backwards',
help='Where to extract elasticsearch')
parser.add_argument('--force', action='store_true', default=False,
help='Delete and redownload if the version already exists')
parser.add_argument('version', metavar='X.Y.Z',
help='Version of elasticsearch to grab')
return parser.parse_args()
def main():
c = parse_config()
if not os.path.exists(c.path):
print('Creating %s' % c.path)
os.mkdir(c.path)
is_windows = platform.system() == 'Windows'
os.chdir(c.path)
version_dir = 'elasticsearch-%s' % c.version
if os.path.exists(version_dir):
if c.force:
print('Removing old download %s' % version_dir)
shutil.rmtree(version_dir)
else:
print('Version %s exists at %s' % (c.version, version_dir))
return
# before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts
if is_windows:
filename = '%s.zip' % version_dir
else:
filename = '%s.tar.gz' % version_dir
if c.version == '1.2.0':
# 1.2.0 was pulled from download.elasticsearch.org because of routing bug:
url = 'http://central.maven.org/maven2/org/elasticsearch/elasticsearch/1.2.0/%s' % filename
elif c.version.startswith('0.') or c.version.startswith('1.'):
url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename
else:
url = 'http://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/%s/%s' % (c.version, filename)
print('Downloading %s' % url)
urllib.request.urlretrieve(url, filename)
print('Extracting to %s' % version_dir)
if is_windows:
archive = zipfile.ZipFile(filename)
archive.extractall()
else:
# for some reason python's tarfile module has trouble with ES tgz?
subprocess.check_call('tar -xzf %s' % filename, shell=True)
print('Cleaning up %s' % filename)
os.remove(filename)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Ctrl-C caught, exiting')
| apache-2.0 |
SocialProgrammingUnion/SamaritanAnalytics | SPU-Front_templates/SPU_BLOG/migrations/env.py | 557 | 2883 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit |
pamoakoy/invenio | modules/webcomment/lib/webcomment_webinterface.py | 1 | 41075 | # -*- coding: utf-8 -*-
## Comments and reviews for records.
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Comments and reviews for records: web interface """
__lastupdated__ = """$Date$"""
__revision__ = """$Id$"""
import cgi
from invenio.webcomment import check_recID_is_in_range, \
perform_request_display_comments_or_remarks, \
perform_request_add_comment_or_remark, \
perform_request_vote, \
perform_request_report, \
subscribe_user_to_discussion, \
unsubscribe_user_from_discussion, \
get_user_subscription_to_discussion, \
check_user_can_attach_file_to_comments, \
check_user_can_view_comments, \
check_user_can_send_comments, \
check_user_can_view_comment, \
query_get_comment
from invenio.config import \
CFG_TMPDIR, \
CFG_SITE_LANG, \
CFG_SITE_URL, \
CFG_PREFIX, \
CFG_WEBCOMMENT_ALLOW_COMMENTS,\
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBCOMMENT_USE_MATHJAX_IN_COMMENTS, \
CFG_SITE_RECORD
from invenio.webuser import getUid, page_not_authorized, isGuestUser, collect_user_info
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.search_engine import create_navtrail_links, \
guess_primary_collection_of_a_record, \
get_colID
from invenio.urlutils import redirect_to_url, \
make_canonical_urlargd
from invenio.htmlutils import get_mathjax_header
from invenio.errorlib import register_exception
from invenio.messages import gettext_set_language
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.websearchadminlib import get_detailed_page_tabs
from invenio.access_control_config import VIEWRESTRCOLL
from invenio.access_control_mailcookie import \
mail_cookie_create_authorize_action, \
mail_cookie_create_common, \
mail_cookie_check_common, \
InvenioWebAccessMailCookieDeletedError, \
InvenioWebAccessMailCookieError
from invenio.webcomment_config import \
CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE, \
CFG_WEBCOMMENT_MAX_ATTACHED_FILES
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
websearch_templates = invenio.template.load('websearch')
try:
from invenio.fckeditor_invenio_connector import FCKeditorConnectorInvenio
fckeditor_available = True
except ImportError, e:
fckeditor_available = False
import os
from invenio import webinterface_handler_config as apache
from invenio.bibdocfile import \
stream_file, \
decompose_file, \
propose_next_docname
class WebInterfaceCommentsPages(WebInterfaceDirectory):
"""Defines the set of /comments pages."""
_exports = ['', 'display', 'add', 'vote', 'report', 'index', 'attachments',
'subscribe', 'unsubscribe']
def __init__(self, recid=-1, reviews=0):
self.recid = recid
self.discussion = reviews # 0:comments, 1:reviews
self.attachments = WebInterfaceCommentsFiles(recid, reviews)
def index(self, req, form):
"""
Redirects to display function
"""
return self.display(req, form)
def display(self, req, form):
"""
Display comments (reviews if enabled) associated with record having id recid where recid>0.
This function can also be used to display remarks associated with basket having id recid where recid<-99.
@param ln: language
@param recid: record id, integer
@param do: display order hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param ds: display since all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param nb: number of results per page
@param p: results page
@param voted: boolean, active if user voted for a review, see vote function
@param reported: int, active if user reported a certain comment/review, see report function
@param reviews: boolean, enabled for reviews, disabled for comments
@param subscribed: int, 1 if user just subscribed to discussion, -1 if unsubscribed
@return the full html page.
"""
argd = wash_urlargd(form, {'do': (str, "od"),
'ds': (str, "all"),
'nb': (int, 100),
'p': (int, 1),
'voted': (int, -1),
'reported': (int, -1),
'subscribed': (int, 0),
'cmtgrp': (list, ["latest"]) # 'latest' is now a reserved group/round name
})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
can_send_comments = False
(auth_code, auth_msg) = check_user_can_send_comments(user_info, self.recid)
if not auth_code:
can_send_comments = True
can_attach_files = False
(auth_code, auth_msg) = check_user_can_attach_file_to_comments(user_info, self.recid)
if not auth_code and (user_info['email'] != 'guest'):
can_attach_files = True
subscription = get_user_subscription_to_discussion(self.recid, uid)
if subscription == 1:
user_is_subscribed_to_discussion = True
user_can_unsubscribe_from_discussion = True
elif subscription == 2:
user_is_subscribed_to_discussion = True
user_can_unsubscribe_from_discussion = False
else:
user_is_subscribed_to_discussion = False
user_can_unsubscribe_from_discussion = False
#display_comment_rounds = [cmtgrp for cmtgrp in argd['cmtgrp'] if cmtgrp.isdigit() or cmtgrp == "all" or cmtgrp == "-1"]
display_comment_rounds = argd['cmtgrp']
check_warnings = []
(ok, problem) = check_recID_is_in_range(self.recid, check_warnings, argd['ln'])
if ok:
(body, errors, warnings) = perform_request_display_comments_or_remarks(req=req, recID=self.recid,
display_order=argd['do'],
display_since=argd['ds'],
nb_per_page=argd['nb'],
page=argd['p'],
ln=argd['ln'],
voted=argd['voted'],
reported=argd['reported'],
subscribed=argd['subscribed'],
reviews=self.discussion,
uid=uid,
can_send_comments=can_send_comments,
can_attach_files=can_attach_files,
user_is_subscribed_to_discussion=user_is_subscribed_to_discussion,
user_can_unsubscribe_from_discussion=user_can_unsubscribe_from_discussion,
display_comment_rounds=display_comment_rounds
)
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(self.recid)),
self.recid,
ln=argd['ln'])
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if argd['ln'] != CFG_SITE_LANG:
link_ln = '?ln=%s' % argd['ln']
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, tab_id, link_ln), \
tab_id in ['comments', 'reviews'],
unordered_tabs[tab_id]['enabled']) \
for (tab_id, order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
top = webstyle_templates.detailed_record_container_top(self.recid,
tabs,
argd['ln'])
bottom = webstyle_templates.detailed_record_container_bottom(self.recid,
tabs,
argd['ln'])
title, description, keywords = websearch_templates.tmpl_record_page_header_content(req, self.recid, argd['ln'])
navtrail = create_navtrail_links(cc=guess_primary_collection_of_a_record(self.recid), ln=argd['ln'])
if navtrail:
navtrail += ' > '
navtrail += '<a class="navtrail" href="%s/%s/%s?ln=%s">'% (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, argd['ln'])
navtrail += title
navtrail += '</a>'
navtrail += ' > <a class="navtrail">%s</a>' % (self.discussion==1 and _("Reviews") or _("Comments"))
mathjaxheader = ''
if CFG_WEBCOMMENT_USE_MATHJAX_IN_COMMENTS:
mathjaxheader = get_mathjax_header()
jqueryheader = '''
<script src="%(CFG_SITE_URL)s/js/jquery.min.js" type="text/javascript" language="javascript"></script>
<script src="%(CFG_SITE_URL)s/js/jquery.MultiFile.pack.js" type="text/javascript" language="javascript"></script>
''' % {'CFG_SITE_URL': CFG_SITE_URL}
return pageheaderonly(title=title,
navtrail=navtrail,
uid=uid,
verbose=1,
metaheaderadd = mathjaxheader + jqueryheader,
req=req,
language=argd['ln'],
navmenuid='search',
navtrail_append_title_p=0) + \
websearch_templates.tmpl_search_pagestart(argd['ln']) + \
top + body + bottom + \
websearch_templates.tmpl_search_pageend(argd['ln']) + \
pagefooteronly(lastupdated=__lastupdated__, language=argd['ln'], req=req)
else:
return page(title=_("Record Not Found"),
body=problem,
uid=uid,
verbose=1,
req=req,
language=argd['ln'],
warnings=check_warnings, errors=[],
navmenuid='search')
# Return the same page wether we ask for /CFG_SITE_RECORD/123 or /CFG_SITE_RECORD/123/
__call__ = index
def add(self, req, form):
"""
Add a comment (review) to record with id recid where recid>0
Also works for adding a remark to basket with id recid where recid<-99
@param ln: languange
@param recid: record id
@param action: 'DISPLAY' to display add form
'SUBMIT' to submit comment once form is filled
'REPLY' to reply to an already existing comment
@param msg: the body of the comment/review or remark
@param score: star score of the review
@param note: title of the review
@param comid: comment id, needed for replying
@param editor_type: the type of editor used for submitting the
comment: 'textarea', 'fckeditor'.
@param subscribe: if set, subscribe user to receive email
notifications when new comment are added to
this discussion
@return the full html page.
"""
argd = wash_urlargd(form, {'action': (str, "DISPLAY"),
'msg': (str, ""),
'note': (str, ''),
'score': (int, 0),
'comid': (int, 0),
'editor_type': (str, ""),
'subscribe': (str, ""),
'cookie': (str, "")
})
_ = gettext_set_language(argd['ln'])
actions = ['DISPLAY', 'REPLY', 'SUBMIT']
uid = getUid(req)
# Is site ready to accept comments?
if uid == -1 or (not CFG_WEBCOMMENT_ALLOW_COMMENTS and not CFG_WEBCOMMENT_ALLOW_REVIEWS):
return page_not_authorized(req, "../comments/add",
navmenuid='search')
# Is user allowed to post comment?
user_info = collect_user_info(req)
(auth_code_1, auth_msg_1) = check_user_can_view_comments(user_info, self.recid)
(auth_code_2, auth_msg_2) = check_user_can_send_comments(user_info, self.recid)
if isGuestUser(uid):
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
# Save user's value in cookie, so that these "POST"
# parameters are not lost during login process
msg_cookie = mail_cookie_create_common('comment_msg',
{'msg': argd['msg'],
'note': argd['note'],
'score': argd['score'],
'editor_type': argd['editor_type'],
'subscribe': argd['subscribe']},
onetime=True)
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri'] + '&cookie=' + msg_cookie}, {})
return redirect_to_url(req, target, norobot=True)
elif (auth_code_1 or auth_code_2):
return page_not_authorized(req, "../", \
text = auth_msg_1 + auth_msg_2)
user_info = collect_user_info(req)
can_attach_files = False
(auth_code, auth_msg) = check_user_can_attach_file_to_comments(user_info, self.recid)
if not auth_code and (user_info['email'] != 'guest'):
can_attach_files = True
warning_msgs = []
added_files = {}
if can_attach_files:
# User is allowed to attach files. Process the files
file_too_big = False
formfields = form.get('commentattachment[]', [])
if not hasattr(formfields, "__getitem__"): # A single file was uploaded
formfields = [formfields]
for formfield in formfields[:CFG_WEBCOMMENT_MAX_ATTACHED_FILES]:
if hasattr(formfield, "filename") and formfield.filename:
filename = formfield.filename
dir_to_open = os.path.join(CFG_TMPDIR, 'webcomment', str(uid))
try:
assert(dir_to_open.startswith(CFG_TMPDIR))
except AssertionError:
register_exception(req=req,
prefix='User #%s tried to upload file to forbidden location: %s' \
% (uid, dir_to_open))
if not os.path.exists(dir_to_open):
try:
os.makedirs(dir_to_open)
except:
register_exception(req=req, alert_admin=True)
## Before saving the file to disc, wash the filename (in particular
## washing away UNIX and Windows (e.g. DFS) paths):
filename = os.path.basename(filename.split('\\')[-1])
filename = filename.strip()
if filename != "":
# Check that file does not already exist
n = 1
while os.path.exists(os.path.join(dir_to_open, filename)):
basedir, name, extension = decompose_file(filename)
new_name = propose_next_docname(name)
filename = new_name + extension
fp = open(os.path.join(dir_to_open, filename), "w")
# FIXME: temporary, waiting for wsgi handler to be
# fixed. Once done, read chunk by chunk
## while formfield.file:
## fp.write(formfield.file.read(10240))
fp.write(formfield.file.read())
fp.close()
# Isn't this file too big?
file_size = os.path.getsize(os.path.join(dir_to_open, filename))
if CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE > 0 and \
file_size > CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE:
os.remove(os.path.join(dir_to_open, filename))
# One file is too big: record that,
# dismiss all uploaded files and re-ask to
# upload again
file_too_big = True
warning_msgs.append(('WRN_WEBCOMMENT_MAX_FILE_SIZE_REACHED', cgi.escape(filename), str(file_size/1024) + 'KB', str(CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE/1024) + 'KB'))
else:
added_files[filename] = os.path.join(dir_to_open, filename)
if file_too_big:
# One file was too big. Removed all uploaded filed
for filepath in added_files.items():
try:
os.remove(filepath)
except:
# File was already removed or does not exist?
pass
client_ip_address = req.remote_ip
check_warnings = []
(ok, problem) = check_recID_is_in_range(self.recid, check_warnings, argd['ln'])
if ok:
title, description, keywords = websearch_templates.tmpl_record_page_header_content(req,
self.recid,
argd['ln'])
navtrail = create_navtrail_links(cc=guess_primary_collection_of_a_record(self.recid))
if navtrail:
navtrail += ' > '
navtrail += '<a class="navtrail" href="%s/%s/%s?ln=%s">'% (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, argd['ln'])
navtrail += title
navtrail += '</a>'
navtrail += '> <a class="navtrail" href="%s/%s/%s/%s/?ln=%s">%s</a>' % (CFG_SITE_URL,
CFG_SITE_RECORD,
self.recid,
self.discussion==1 and 'reviews' or 'comments',
argd['ln'],
self.discussion==1 and _('Reviews') or _('Comments'))
if argd['action'] not in actions:
argd['action'] = 'DISPLAY'
if not argd['msg']:
# User had to login in-between, so retrieve msg
# from cookie
try:
(kind, cookie_argd) = mail_cookie_check_common(argd['cookie'],
delete=True)
argd.update(cookie_argd)
except InvenioWebAccessMailCookieDeletedError, e:
return redirect_to_url(req, CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(self.recid) + (self.discussion==1 and \
'/reviews' or '/comments'))
except InvenioWebAccessMailCookieError, e:
# Invalid or empty cookie: continue
pass
subscribe = False
if argd['subscribe'] and \
get_user_subscription_to_discussion(self.recid, uid) == 0:
# User is not already subscribed, and asked to subscribe
subscribe = True
(body, errors, warnings) = perform_request_add_comment_or_remark(recID=self.recid,
ln=argd['ln'],
uid=uid,
action=argd['action'],
msg=argd['msg'],
note=argd['note'],
score=argd['score'],
reviews=self.discussion,
comID=argd['comid'],
client_ip_address=client_ip_address,
editor_type=argd['editor_type'],
can_attach_files=can_attach_files,
subscribe=subscribe,
req=req,
attached_files=added_files,
warnings=warning_msgs)
if self.discussion:
title = _("Add Review")
else:
title = _("Add Comment")
jqueryheader = '''
<script src="%(CFG_SITE_URL)s/js/jquery.min.js" type="text/javascript" language="javascript"></script>
<script src="%(CFG_SITE_URL)s/js/jquery.MultiFile.pack.js" type="text/javascript" language="javascript"></script>
''' % {'CFG_SITE_URL': CFG_SITE_URL}
return page(title=title,
body=body,
navtrail=navtrail,
uid=uid,
language=CFG_SITE_LANG,
verbose=1,
errors=errors,
warnings=warnings,
req=req,
navmenuid='search',
metaheaderadd=jqueryheader)
# id not in range
else:
return page(title=_("Record Not Found"),
body=problem,
uid=uid,
verbose=1,
req=req,
warnings=check_warnings, errors=[],
navmenuid='search')
def vote(self, req, form):
"""
Vote positively or negatively for a comment/review.
@param comid: comment/review id
@param com_value: +1 to vote positively
-1 to vote negatively
@param recid: the id of the record the comment/review is associated with
@param ln: language
@param do: display order hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param ds: display since all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param nb: number of results per page
@param p: results page
@param referer: http address of the calling function to redirect to (refresh)
@param reviews: boolean, enabled for reviews, disabled for comments
"""
argd = wash_urlargd(form, {'comid': (int, -1),
'com_value': (int, 0),
'recid': (int, -1),
'do': (str, "od"),
'ds': (str, "all"),
'nb': (int, 100),
'p': (int, 1),
'referer': (str, None)
})
client_ip_address = req.remote_ip
uid = getUid(req)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
success = perform_request_vote(argd['comid'], client_ip_address, argd['com_value'], uid)
if argd['referer']:
argd['referer'] += "?ln=%s&do=%s&ds=%s&nb=%s&p=%s&voted=%s&" % (
argd['ln'], argd['do'], argd['ds'], argd['nb'], argd['p'], success)
redirect_to_url(req, argd['referer'])
else:
#Note: sent to comments display
referer = "%s/%s/%s/%s?&ln=%s&voted=1"
referer %= (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, self.discussion == 1 and 'reviews' or 'comments', argd['ln'])
redirect_to_url(req, referer)
def report(self, req, form):
"""
Report a comment/review for inappropriate content
@param comid: comment/review id
@param recid: the id of the record the comment/review is associated with
@param ln: language
@param do: display order hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param ds: display since all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param nb: number of results per page
@param p: results page
@param referer: http address of the calling function to redirect to (refresh)
@param reviews: boolean, enabled for reviews, disabled for comments
"""
argd = wash_urlargd(form, {'comid': (int, -1),
'recid': (int, -1),
'do': (str, "od"),
'ds': (str, "all"),
'nb': (int, 100),
'p': (int, 1),
'referer': (str, None)
})
client_ip_address = req.remote_ip
uid = getUid(req)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if auth_code or user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
success = perform_request_report(argd['comid'], client_ip_address, uid)
if argd['referer']:
argd['referer'] += "?ln=%s&do=%s&ds=%s&nb=%s&p=%s&reported=%s&" % (argd['ln'], argd['do'], argd['ds'], argd['nb'], argd['p'], str(success))
redirect_to_url(req, argd['referer'])
else:
#Note: sent to comments display
referer = "%s/%s/%s/%s/display?ln=%s&voted=1"
referer %= (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, self.discussion==1 and 'reviews' or 'comments', argd['ln'])
redirect_to_url(req, referer)
def subscribe(self, req, form):
"""
Subscribe current user to receive email notification when new
comments are added to current discussion.
"""
argd = wash_urlargd(form, {'referer': (str, None)})
uid = getUid(req)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if isGuestUser(uid):
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
success = subscribe_user_to_discussion(self.recid, uid)
display_url = "%s/%s/%s/comments/display?subscribed=%s&ln=%s" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.recid, str(success), argd['ln'])
redirect_to_url(req, display_url)
def unsubscribe(self, req, form):
"""
Unsubscribe current user from current discussion.
"""
argd = wash_urlargd(form, {'referer': (str, None)})
user_info = collect_user_info(req)
uid = getUid(req)
if isGuestUser(uid):
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
success = unsubscribe_user_from_discussion(self.recid, uid)
display_url = "%s/%s/%s/comments/display?subscribed=%s&ln=%s" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.recid, str(-success), argd['ln'])
redirect_to_url(req, display_url)
class WebInterfaceCommentsFiles(WebInterfaceDirectory):
"""Handle <strike>upload and </strike> access to files for comments.
<strike>The upload is currently only available through the FCKeditor.</strike>
"""
#_exports = ['put'] # 'get' is handled by _lookup(..)
def __init__(self, recid=-1, reviews=0):
self.recid = recid
self.discussion = reviews # 0:comments, 1:reviews
def _lookup(self, component, path):
""" This handler is invoked for the dynamic URLs (for getting
<strike>and putting attachments</strike>) Eg:
CFG_SITE_URL/CFG_SITE_RECORD/5953/comments/attachments/get/652/myfile.pdf
"""
if component == 'get' and len(path) > 1:
comid = path[0] # comment ID
file_name = '/'.join(path[1:]) # the filename
def answer_get(req, form):
"""Accessing files attached to comments."""
form['file'] = file_name
form['comid'] = comid
return self._get(req, form)
return answer_get, []
# All other cases: file not found
return None, []
def _get(self, req, form):
"""
Returns a file attached to a comment.
Example:
CFG_SITE_URL/CFG_SITE_RECORD/5953/comments/attachments/get/652/myfile.pdf
where 652 is the comment ID
"""
argd = wash_urlargd(form, {'file': (str, None),
'comid': (int, 0)})
_ = gettext_set_language(argd['ln'])
# Can user view this record, i.e. can user access its
# attachments?
uid = getUid(req)
user_info = collect_user_info(req)
# Check that user can view record, and its comments (protected
# with action "viewcomment")
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
# Does comment exist?
if not query_get_comment(argd['comid']):
req.status = apache.HTTP_NOT_FOUND
return page(title=_("Page Not Found"),
body=_('The requested comment could not be found'),
req=req)
# Check that user can view this particular comment, protected
# using its own restriction
(auth_code, auth_msg) = check_user_can_view_comment(user_info, argd['comid'])
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg,
ln=argd['ln'])
if not argd['file'] is None:
# Prepare path to file on disk. Normalize the path so that
# ../ and other dangerous components are removed.
path = os.path.abspath(CFG_PREFIX + '/var/data/comments/' + \
str(self.recid) + '/' + str(argd['comid']) + \
'/' + argd['file'])
# Check that we are really accessing attachements
# directory, for the declared record.
if path.startswith(CFG_PREFIX + '/var/data/comments/' + \
str(self.recid)) and \
os.path.exists(path):
return stream_file(req, path)
# Send error 404 in all other cases
req.status = apache.HTTP_NOT_FOUND
return page(title=_("Page Not Found"),
body=_('The requested file could not be found'),
req=req,
language=argd['ln'])
## def put(self, req, form):
## """
## Process requests received from FCKeditor to upload files, etc.
## """
## if not fckeditor_available:
## return
## uid = getUid(req)
## # URL where the file can be fetched after upload
## user_files_path = '%(CFG_SITE_URL)s/%(CFG_SITE_RECORD)s/%(recid)i/comments/attachments/get/%(uid)s' % \
## {'uid': uid,
## 'recid': self.recid,
## 'CFG_SITE_URL': CFG_SITE_URL}
## # Path to directory where uploaded files are saved
## user_files_absolute_path = '%(CFG_PREFIX)s/var/data/comments/%(recid)s/%(uid)s' % \
## {'uid': uid,
## 'recid': self.recid,
## 'CFG_PREFIX': CFG_PREFIX}
## # Create a Connector instance to handle the request
## conn = FCKeditorConnectorInvenio(form, recid=self.recid, uid=uid,
## allowed_commands=['QuickUpload'],
## allowed_types = ['File', 'Image', 'Flash', 'Media'],
## user_files_path = user_files_path,
## user_files_absolute_path = user_files_absolute_path)
## # Check that user can upload attachments for comments.
## user_info = collect_user_info(req)
## (auth_code, auth_msg) = check_user_can_attach_file_to_comments(user_info, self.recid)
## if user_info['email'] == 'guest':
## # User is guest: must login prior to upload
## data = conn.sendUploadResults(1, '', '', 'Please login before uploading file.')
## elif auth_code:
## # User cannot submit
## data = conn.sendUploadResults(1, '', '', 'Sorry, you are not allowed to submit files.')
## else:
## # Process the upload and get the response
## data = conn.doResponse()
## # Transform the headers into something ok for mod_python
## for header in conn.headers:
## if not header is None:
## if header[0] == 'Content-Type':
## req.content_type = header[1]
## else:
## req.headers_out[header[0]] = header[1]
## # Send our response
## req.send_http_header()
## req.write(data)
| gpl-2.0 |
sunqb/oa_qian | flask/Lib/site-packages/wheel/signatures/__init__.py | 565 | 3779 | """
Create and verify jws-js format Ed25519 signatures.
"""
__all__ = [ 'sign', 'verify' ]
import json
from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary
ed25519ll = None
ALG = "Ed25519"
def get_ed25519ll():
"""Lazy import-and-test of ed25519 module"""
global ed25519ll
if not ed25519ll:
try:
import ed25519ll # fast (thousands / s)
except (ImportError, OSError): # pragma nocover
from . import ed25519py as ed25519ll # pure Python (hundreds / s)
test()
return ed25519ll
def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header":native(encoded_header),
"signature":native(encoded_signature)}],
"payload": native(encoded_payload)}
def assertTrue(condition, message=""):
if not condition:
raise ValueError(message)
def verify(jwsjs):
"""Return (decoded headers, payload) if all signatures in jwsjs are
consistent, else raise ValueError.
Caller must decide whether the keys are actually trusted."""
get_ed25519ll()
# XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+)
recipients = jwsjs["recipients"]
encoded_payload = binary(jwsjs["payload"])
headers = []
for recipient in recipients:
assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient))
h = binary(recipient["header"])
s = binary(recipient["signature"])
header = json.loads(native(urlsafe_b64decode(h)))
assertTrue(header["alg"] == ALG,
"Unexpected algorithm {0}".format(header["alg"]))
if "alg" in header["jwk"] and not "kty" in header["jwk"]:
header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08
assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519
"Unexpected key type {0}".format(header["jwk"]["kty"]))
vk = urlsafe_b64decode(binary(header["jwk"]["vk"]))
secured_input = b".".join((h, encoded_payload))
sig = urlsafe_b64decode(s)
sig_msg = sig+secured_input
verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk))
verified_header, verified_payload = verified_input.split('.')
verified_header = binary(verified_header)
decoded_header = native(urlsafe_b64decode(verified_header))
headers.append(json.loads(decoded_header))
verified_payload = binary(verified_payload)
# only return header, payload that have passed through the crypto library.
payload = json.loads(native(urlsafe_b64decode(verified_payload)))
return headers, payload
def test():
kp = ed25519ll.crypto_sign_keypair()
payload = {'test': 'onstartup'}
jwsjs = json.loads(json.dumps(sign(payload, kp)))
verify(jwsjs)
jwsjs['payload'] += 'x'
try:
verify(jwsjs)
except ValueError:
pass
else: # pragma no cover
raise RuntimeError("No error from bad wheel.signatures payload.")
| apache-2.0 |
Comunitea/OCB | addons/account_anglo_saxon/stock.py | 57 | 3489 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class stock_move(osv.Model):
_inherit = "stock.move"
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
""" Add a reference to the stock.move in the invoice line
In anglo-saxon the price for COGS should be taken from stock.move
if possible (fallback on standard_price)
"""
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
res.update({
'move_id': move.id,
})
return res
class stock_picking(osv.osv):
_inherit = "stock.picking"
_description = "Picking List"
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
'''Return ids of created invoices for the pickings'''
res = super(stock_picking,self).action_invoice_create(cr, uid, ids, journal_id, group, type, context=context)
if type in ('in_invoice', 'in_refund'):
for inv in self.pool.get('account.invoice').browse(cr, uid, res, context=context):
for ol in inv.invoice_line:
if ol.product_id.type != 'service':
oa = ol.product_id.property_stock_account_input and ol.product_id.property_stock_account_input.id
if not oa:
oa = ol.product_id.categ_id.property_stock_account_input_categ and ol.product_id.categ_id.property_stock_account_input_categ.id
if oa:
fpos = ol.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
tax_line = ol.invoice_line_tax_id.filtered(lambda l: not l.account_collected_id) if ol.invoice_line_tax_id else False
if tax_line:
for tax in tax_line:
tax_id = self.pool['account.invoice.tax'].search(cr, uid, [('invoice_id', '=', ol.invoice_id.id), ('name', '=', tax.name), ('account_id', '=', ol.account_id.id)], limit=1)
self.pool['account.invoice.tax'].write(cr, uid, tax_id, {'account_id': a})
self.pool.get('account.invoice.line').write(cr, uid, [ol.id], {'account_id': a})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/IO/Parallel/Testing/Python/TestPImageWriter.py | 26 | 1106 | #!/usr/bin/env python
# Image pipeline
image1 = vtk.vtkTIFFReader()
image1.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/beach.tif")
# "beach.tif" image contains ORIENTATION tag which is
# ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF
# reader parses this tag and sets the internal TIFF image
# orientation accordingly. To overwrite this orientation with a vtk
# convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke
# SetOrientationType method with parameter value of 4.
image1.SetOrientationType(4)
image1.Update()
#
# If the current directory is writable, then test the witers
#
if (catch.catch(globals(),"""channel = open(test.tmp, w)""") == 0):
channel.close()
file.delete("-force", test.tmp)
piw = vtk.vtkPImageWriter()
piw.SetInputConnection(image1.GetOutputPort())
piw.SetFileName(piw.raw)
piw.SetMemoryLimit(1)
piw.Write()
file.delete("-force", piw.raw)
pass
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(image1.GetOutputPort())
viewer.SetColorWindow(255)
viewer.SetColorLevel(127.5)
viewer.Render()
# --- end of script --
| gpl-3.0 |
juvham/Cactus | cactus/tests/test_compat.py | 9 | 1359 | #coding:utf-8
from cactus.tests import SiteTestCase
class TestCompatibility(SiteTestCase):
def _paths_key_exists(self, obj, old_key):
try:
obj.paths[old_key]
except KeyError:
self.fail('Old key does not exist anymore: {0}'.format(old_key))
def test_compatibility(self):
"""
Test that we can access the path elements the "old" way
Just try a few of them, we're not locking this down - just testing
"""
for old_key, new_field in (
('build', 'build_path'),
('pages', 'page_path'),
):
self.assertEqual(self.site.paths[old_key], getattr(self.site, new_field))
def test_site_paths_keys_exist(self):
for old_key in ('build', 'pages', 'plugins', 'templates', 'static', 'script'):
self._paths_key_exists(self.site, old_key)
def test_page_paths_keys_exist(self):
page = self.site.pages()[0]
for old_key in ('full', 'full-build'):
self._paths_key_exists(page, old_key)
def test_page_path_attr(self):
page = self.site.pages()[0]
self.assertEqual(page.source_path, page.path)
def test_page_paths_keys_exist(self):
static = self.site.static()[0]
for old_key in ('full', 'full-build'):
self._paths_key_exists(static, old_key) | bsd-3-clause |
storborg/pyweaving | pyweaving/__init__.py | 1 | 19858 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
import json
from copy import deepcopy
from collections import defaultdict
__version__ = '0.0.8.dev'
class Color(object):
"""
A color type. Internally stored as RGB, and does not support transparency.
"""
def __init__(self, rgb):
if not isinstance(rgb, tuple):
rgb = tuple(rgb)
self.rgb = rgb
def __eq__(self, other):
return self.rgb == other.rgb
def __ne__(self, other):
return self.rgb != other.rgb
@property
def css(self):
return 'rgb(%d, %d, %d)' % self.rgb
def __str__(self):
return str(self.rgb)
class WarpThread(object):
"""
Represents a single warp thread.
"""
def __init__(self, color=None, shaft=None):
if color and not isinstance(color, Color):
color = Color(color)
self.color = color
self.shaft = shaft
def __repr__(self):
return '<WarpThread color:%s shaft:%s>' % (self.color.rgb, self.shaft)
class WeftThread(object):
"""
Represents a single weft thread.
"""
def __init__(self, color=None, shafts=None, treadles=None):
if color and not isinstance(color, Color):
color = Color(color)
self.color = color
assert not (shafts and treadles), \
"can't have both shafts (liftplan) and treadles specified"
self.treadles = treadles or set()
self.shafts = shafts or set()
@property
def connected_shafts(self):
if self.shafts:
return self.shafts
else:
assert self.treadles
ret = set()
for treadle in self.treadles:
ret.update(treadle.shafts)
return ret
def __repr__(self):
if self.treadles:
return '<WeftThread color:%s treadles:%s>' % (self.color.rgb,
self.treadles)
else:
return '<WeftThread color:%s shafts:%s>' % (self.color.rgb,
self.shafts)
class Shaft(object):
"""
Represents a single shaft of the loom.
"""
pass
class Treadle(object):
"""
Represents a single treadle of the loom.
"""
def __init__(self, shafts=None):
self.shafts = shafts or set()
class DraftError(Exception):
pass
class Draft(object):
"""
The core representation of a weaving draft.
"""
def __init__(self, num_shafts, num_treadles=0, liftplan=False,
rising_shed=True, start_at_lowest_thread=True,
date=None, title='', author='', address='',
email='', telephone='', fax='', notes=''):
self.liftplan = liftplan or (num_treadles == 0)
self.rising_shed = rising_shed
self.start_at_lowest_thread = start_at_lowest_thread
self.shafts = []
for __ in range(num_shafts):
self.shafts.append(Shaft())
self.treadles = []
for __ in range(num_treadles):
self.treadles.append(Treadle())
self.warp = []
self.weft = []
self.date = date or datetime.date.today().strftime('%b %d, %Y')
self.title = title
self.author = author
self.address = address
self.email = email
self.telephone = telephone
self.fax = fax
self.notes = notes
@classmethod
def from_json(cls, s):
"""
Construct a new Draft instance from its JSON representation.
Counterpart to ``.to_json()``.
"""
obj = json.loads(s)
warp = obj.pop('warp')
weft = obj.pop('weft')
tieup = obj.pop('tieup')
draft = cls(**obj)
for thread_obj in warp:
draft.add_warp_thread(
color=thread_obj['color'],
shaft=draft.shafts[thread_obj['shaft']],
)
for thread_obj in weft:
draft.add_weft_thread(
color=thread_obj['color'],
shafts=set(draft.shafts[n] for n in thread_obj['shafts']),
treadles=set(draft.treadles[n] for n in
thread_obj['treadles']),
)
for ii, shaft_nos in enumerate(tieup):
draft.treadles[ii].shafts = set(draft.shafts[n] for n in shaft_nos)
return draft
def to_json(self):
"""
Serialize a Draft to its JSON representation. Counterpart to
``.from_json()``.
"""
return json.dumps({
'liftplan': self.liftplan,
'rising_shed': self.rising_shed,
'num_shafts': len(self.shafts),
'num_treadles': len(self.treadles),
'warp': [{
'color': thread.color.rgb,
'shaft': self.shafts.index(thread.shaft),
} for thread in self.warp],
'weft': [{
'color': thread.color.rgb,
'treadles': [self.treadles.index(tr)
for tr in thread.treadles],
'shafts': [self.shafts.index(sh)
for sh in thread.connected_shafts],
} for thread in self.weft],
'tieup': [
[self.shafts.index(sh) for sh in treadle.shafts]
for treadle in self.treadles
],
'date': self.date,
'title': self.title,
'author': self.author,
'address': self.address,
'email': self.email,
'telephone': self.telephone,
'fax': self.fax,
'notes': self.notes,
})
def copy(self):
"""
Make a complete copy of this draft.
"""
return deepcopy(self)
def add_warp_thread(self, color=None, index=None, shaft=0):
"""
Add a warp thread to this draft.
"""
if not isinstance(shaft, Shaft):
shaft = self.shafts[shaft]
thread = WarpThread(
color=color,
shaft=shaft,
)
if index is None:
self.warp.append(thread)
else:
self.warp.insert(index, thread)
def add_weft_thread(self, color=None, index=None,
shafts=None, treadles=None):
"""
Add a weft thread to this draft.
"""
shafts = shafts or set()
shaft_objs = set()
for shaft in shafts:
if not isinstance(shaft, Shaft):
shaft = self.shafts[shaft]
shaft_objs.add(shaft)
treadles = treadles or set()
treadle_objs = set()
for treadle in treadles:
if not isinstance(treadle, Treadle):
treadle = self.treadles[treadle]
treadle_objs.add(treadle)
thread = WeftThread(
color=color,
shafts=shaft_objs,
treadles=treadle_objs,
)
if index is None:
self.weft.append(thread)
else:
self.weft.insert(index, thread)
def compute_drawdown_at(self, position):
"""
Return the thread that is on top (visible) at the specified
zero-indexed position.
"""
x, y = position
warp_thread = self.warp[x]
weft_thread = self.weft[y]
connected_shafts = weft_thread.connected_shafts
warp_at_rest = warp_thread.shaft not in connected_shafts
if warp_at_rest ^ self.rising_shed:
return warp_thread
else:
return weft_thread
def compute_drawdown(self):
"""
Compute a 2D array containing the thread visible at each position.
"""
num_warp_threads = len(self.warp)
num_weft_threads = len(self.weft)
return [[self.compute_drawdown_at((x, y))
for y in range(num_weft_threads)]
for x in range(num_warp_threads)]
def compute_floats(self):
"""
Return an iterator over every float, yielding a tuple for each one::
(start, end, visible, length, thread)
FIXME: This ignores the back side of the fabric. Should it?
"""
num_warp_threads = len(self.warp)
num_weft_threads = len(self.weft)
drawdown = self.compute_drawdown()
# Iterate over each warp thread, then each weft thread
# For each thread, find the position of each change in state
for x, thread in enumerate(self.warp):
this_vis_state = (thread == drawdown[x][0])
last = this_start = (x, 0)
for y in range(1, num_weft_threads):
check_vis_state = (thread == drawdown[x][y])
if check_vis_state != this_vis_state:
length = last[1] - this_start[1]
yield this_start, last, this_vis_state, length, thread
this_vis_state = check_vis_state
this_start = x, y
last = x, y
length = last[1] - this_start[1]
yield this_start, last, this_vis_state, length, thread
for y, thread in enumerate(self.weft):
this_vis_state = (thread == drawdown[0][y])
last = this_start = (0, y)
for x in range(1, num_warp_threads):
check_vis_state = (thread == drawdown[x][y])
if check_vis_state != this_vis_state:
length = last[0] - this_start[0]
yield this_start, last, this_vis_state, length, thread
this_vis_state = check_vis_state
this_start = x, y
last = x, y
length = last[0] - this_start[0]
yield this_start, last, this_vis_state, length, thread
def compute_longest_floats(self):
"""
Return a tuple indicating the longest floats for warp, weft.
FIXME This might be producing incorrect results.
"""
floats = list(self.compute_floats())
return (
max(length
for start, end, visible, length, thread in floats
if isinstance(thread, WarpThread)),
max(length
for start, end, visible, length, thread in floats
if isinstance(thread, WeftThread)),
)
def reduce_shafts(self):
"""
Optimize to use the fewest number of shafts, to attempt to make a
complex draft possible to weave on a loom with fewer shafts. Note that
this may make the threading more complex or less periodic.
"""
raise NotImplementedError
def reduce_treadles(self):
"""
Optimize to use the fewest number of total treadles, to attempt to make
a complex draft possible to weave on a loom with a smaller number of
treadles. Note that this may require that more treadles are active on
any given pick.
Cannot be called on a liftplan draft.
"""
raise NotImplementedError
def reduce_active_treadles(self):
"""
Optimize to use the fewest number of active treadles on any given pick,
because not every weaver is an octopus. Note that this may mean using
more total treadles.
Cannot be called on a liftplan draft.
"""
if self.liftplan:
raise ValueError("can't reduce treadles on a liftplan draft")
if True or max(len(thread.treadles) for thread in self.weft) > 1:
used_shaft_combos = defaultdict(list)
for thread in self.weft:
used_shaft_combos[frozenset(thread.connected_shafts)].\
append(thread)
self.treadles = []
for shafts, threads in used_shaft_combos.items():
treadle = Treadle(shafts=set(shafts))
self.treadles.append(treadle)
for thread in threads:
thread.treadles = set([treadle])
def sort_threading(self):
"""
Reorder the shaft assignment in threading so that it follows as
sequential of an order as possible.
For a liftplan draft, will change the threading and liftplan.
For a treadled draft, will change the threading and tieup, won't change
the treadling.
"""
raise NotImplementedError
def sort_treadles(self):
"""
Reorder the treadle assignment in tieup so that it follows as
sequential of an order as possible in treadling.
Will change the tieup and treadling, won't change the threading. If
sorting both threading and treadles, call ``.sort_threading()`` before
calling ``.sort_treadles()``.
Cannot be called on a liftplan draft.
"""
raise NotImplementedError
def invert_shed(self):
"""
Convert from rising shed to sinking shed, or vice versa. Note that this
will actually update the threading/tie-up to preserve the same
drawdown: if this is not desired, simply change the .rising_shed
attribute.
"""
self.rising_shed = not self.rising_shed
for thread in self.weft:
thread.shafts = self.shafts - thread.shafts
for treadle in self.treadles:
treadle.shafts = self.shafts - treadle.shafts
def rotate(self):
"""
Rotate the draft: the weft becomes the warp, and vice versa.
"""
raise NotImplementedError
def flip_weftwise(self):
"""
Flip/mirror along the weft axis: e.g. looking at the front of the loom,
the left side of the fabric becomes the right, and the right becomes
the left.
"""
self.warp.reverse()
def flip_warpwise(self):
"""
Flip/mirror along the warp axis: e.g. looking at the front of the loom,
the near side of the fabric becomes the far, and the far becomes
the near.
"""
self.weft.reverse()
def selvedges_continuous(self):
"""
Check whether or not both selvedge threads are "continuous" (will be
picked up on every pick).
"""
return (self.selvedge_continuous(False) and
self.selvedge_continuous(True))
def selvedge_continuous(self, low):
"""
Check whether the selvedge corresponding to the lowest-number thread is
continuous.
"""
# For the low selvedge:
# If this draft starts at the lowest thread, there needs to be a
# transition between threads 1 and 2 (0-indexed), threads 3 and 4, etc.
# Otherwise, there needs to be a transition between 0 and 1, 2 and 3,
# etc.
# For the high selvedge:
# If this draft starts at the highest thread, there needs to be a
# transition between threads 0 and 1, threads 2 and 3, etc.
offset = 0 if low ^ self.start_at_lowest_thread else 1
if low:
thread = self.warp[0]
else:
thread = self.warp[-1]
for ii in range(offset, len(self.weft) - 1, 2):
a_state = thread.shaft in self.weft[ii].connected_shafts
b_state = thread.shaft in self.weft[ii + 1].connected_shafts
if not a_state ^ b_state:
return False
return True
def make_selvedges_continuous(self, add_new_shafts=False):
"""
Make the selvedge threads "continuous": that is, threaded and treadled
such that they are picked up on every pick. This method will try to use
the liftplan/tieup and switch selvedge threads to alternate shafts. If
that is impossible and ``add_new_shafts`` new shafts will be added to
handle the selvedge threads.
FIXME This method works, but it does not necessarily produce the
subjectively "best" solution in terms of aesthetics and structure. For
example, it may result in longer floats than necessary.
"""
for low_thread in (False, True):
success = False
if low_thread:
warp_thread = self.warp[0]
else:
warp_thread = self.warp[-1]
if self.selvedge_continuous(low_thread):
success = True
continue
for shaft in self.shafts:
warp_thread.shaft = shaft
if self.selvedge_continuous(low_thread):
success = True
break
if not success:
if add_new_shafts:
raise NotImplementedError
else:
raise DraftError("cannot make continuous selvedges")
def compute_weft_crossings(self):
"""
Iterate over each weft row and compute the total number of thread
crossings in that row. Useful for determining sett.
"""
raise NotImplementedError
def compute_warp_crossings(self):
"""
Iterate over each warp row and compute the total number of thread
crossings in that row.
"""
raise NotImplementedError
def repeat(self, n):
"""
Given a base draft, make it repeat with N units in each direction.
"""
initial_warp = list(self.warp)
initial_weft = list(self.weft)
for ii in range(n):
for thread in initial_warp:
self.add_warp_thread(
color=thread.color,
shaft=thread.shaft,
)
for thread in initial_weft:
self.add_weft_thread(
color=thread.color,
treadles=thread.treadles,
shafts=thread.shafts,
)
def advance(self):
"""
Given a base draft, make it 'advance'. Essentially:
1. Repeat the draft N times, where N is the number of shafts, in
both the warp and weft directions.
2. On each successive repeat, offset the threading by 1 additional
shaft and the treadling by one additional treadle.
"""
initial_warp = list(self.warp)
initial_weft = list(self.weft)
num_shafts = len(self.shafts)
num_treadles = len(self.treadles)
for ii in range(1, num_shafts):
print("ADVANCE %d" % ii)
for thread in initial_warp:
print(" thread")
initial_shaft = self.shafts.index(thread.shaft)
print(" initial shaft: %d" % initial_shaft)
new_shaft = (initial_shaft + ii) % num_shafts
print(" new shaft: %d" % new_shaft)
self.add_warp_thread(
color=thread.color,
shaft=new_shaft,
)
for thread in initial_weft:
initial_treadles = [self.treadles.index(treadle)
for treadle in thread.treadles]
new_treadles = [(treadle + ii) % num_treadles
for treadle in initial_treadles]
initial_shafts = [self.shafts.index(shaft)
for shaft in thread.shafts]
new_shafts = [(shaft + ii) % num_shafts
for shaft in initial_shafts]
self.add_weft_thread(
color=thread.color,
treadles=new_treadles,
shafts=new_shafts,
)
def all_threads_attached(self):
"""
Check whether all threads (weft and warp) will be "attached" to the
fabric, instead of just falling off.
"""
raise NotImplementedError
| mit |
googleapis/googleapis-gen | google/cloud/speech/v1/speech-v1-py/tests/unit/gapic/speech_v1/test_speech.py | 1 | 55209 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.speech_v1.services.speech import SpeechAsyncClient
from google.cloud.speech_v1.services.speech import SpeechClient
from google.cloud.speech_v1.services.speech import transports
from google.cloud.speech_v1.services.speech.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.speech_v1.types import cloud_speech
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.rpc import status_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SpeechClient._get_default_mtls_endpoint(None) is None
assert SpeechClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert SpeechClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert SpeechClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert SpeechClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert SpeechClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
SpeechClient,
SpeechAsyncClient,
])
def test_speech_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'speech.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
SpeechClient,
SpeechAsyncClient,
])
def test_speech_client_service_account_always_use_jwt(client_class):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
client = client_class(credentials=creds)
use_jwt.assert_not_called()
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.SpeechGrpcTransport, "grpc"),
(transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_speech_client_service_account_always_use_jwt_true(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
@pytest.mark.parametrize("client_class", [
SpeechClient,
SpeechAsyncClient,
])
def test_speech_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'speech.googleapis.com:443'
def test_speech_client_get_transport_class():
transport = SpeechClient.get_transport_class()
available_transports = [
transports.SpeechGrpcTransport,
]
assert transport in available_transports
transport = SpeechClient.get_transport_class("grpc")
assert transport == transports.SpeechGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SpeechClient, transports.SpeechGrpcTransport, "grpc"),
(SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(SpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechClient))
@mock.patch.object(SpeechAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechAsyncClient))
def test_speech_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SpeechClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SpeechClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(SpeechClient, transports.SpeechGrpcTransport, "grpc", "true"),
(SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(SpeechClient, transports.SpeechGrpcTransport, "grpc", "false"),
(SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(SpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechClient))
@mock.patch.object(SpeechAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_speech_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SpeechClient, transports.SpeechGrpcTransport, "grpc"),
(SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_speech_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SpeechClient, transports.SpeechGrpcTransport, "grpc"),
(SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_speech_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_speech_client_client_options_from_dict():
with mock.patch('google.cloud.speech_v1.services.speech.transports.SpeechGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = SpeechClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_recognize(transport: str = 'grpc', request_type=cloud_speech.RecognizeRequest):
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_speech.RecognizeResponse(
)
response = client.recognize(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_speech.RecognizeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_speech.RecognizeResponse)
def test_recognize_from_dict():
test_recognize(request_type=dict)
def test_recognize_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.recognize),
'__call__') as call:
client.recognize()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_speech.RecognizeRequest()
@pytest.mark.asyncio
async def test_recognize_async(transport: str = 'grpc_asyncio', request_type=cloud_speech.RecognizeRequest):
client = SpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_speech.RecognizeResponse(
))
response = await client.recognize(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_speech.RecognizeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_speech.RecognizeResponse)
@pytest.mark.asyncio
async def test_recognize_async_from_dict():
await test_recognize_async(request_type=dict)
def test_recognize_flattened():
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_speech.RecognizeResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.recognize(
config=cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16),
audio=cloud_speech.RecognitionAudio(content=b'content_blob'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].config == cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16)
assert args[0].audio == cloud_speech.RecognitionAudio(content=b'content_blob')
def test_recognize_flattened_error():
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.recognize(
cloud_speech.RecognizeRequest(),
config=cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16),
audio=cloud_speech.RecognitionAudio(content=b'content_blob'),
)
@pytest.mark.asyncio
async def test_recognize_flattened_async():
client = SpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_speech.RecognizeResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloud_speech.RecognizeResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.recognize(
config=cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16),
audio=cloud_speech.RecognitionAudio(content=b'content_blob'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].config == cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16)
assert args[0].audio == cloud_speech.RecognitionAudio(content=b'content_blob')
@pytest.mark.asyncio
async def test_recognize_flattened_error_async():
client = SpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.recognize(
cloud_speech.RecognizeRequest(),
config=cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16),
audio=cloud_speech.RecognitionAudio(content=b'content_blob'),
)
def test_long_running_recognize(transport: str = 'grpc', request_type=cloud_speech.LongRunningRecognizeRequest):
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.long_running_recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.long_running_recognize(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_speech.LongRunningRecognizeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_long_running_recognize_from_dict():
test_long_running_recognize(request_type=dict)
def test_long_running_recognize_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.long_running_recognize),
'__call__') as call:
client.long_running_recognize()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_speech.LongRunningRecognizeRequest()
@pytest.mark.asyncio
async def test_long_running_recognize_async(transport: str = 'grpc_asyncio', request_type=cloud_speech.LongRunningRecognizeRequest):
client = SpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.long_running_recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.long_running_recognize(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_speech.LongRunningRecognizeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_long_running_recognize_async_from_dict():
await test_long_running_recognize_async(request_type=dict)
def test_long_running_recognize_flattened():
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.long_running_recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.long_running_recognize(
config=cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16),
audio=cloud_speech.RecognitionAudio(content=b'content_blob'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].config == cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16)
assert args[0].audio == cloud_speech.RecognitionAudio(content=b'content_blob')
def test_long_running_recognize_flattened_error():
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.long_running_recognize(
cloud_speech.LongRunningRecognizeRequest(),
config=cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16),
audio=cloud_speech.RecognitionAudio(content=b'content_blob'),
)
@pytest.mark.asyncio
async def test_long_running_recognize_flattened_async():
client = SpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.long_running_recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.long_running_recognize(
config=cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16),
audio=cloud_speech.RecognitionAudio(content=b'content_blob'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].config == cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16)
assert args[0].audio == cloud_speech.RecognitionAudio(content=b'content_blob')
@pytest.mark.asyncio
async def test_long_running_recognize_flattened_error_async():
client = SpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.long_running_recognize(
cloud_speech.LongRunningRecognizeRequest(),
config=cloud_speech.RecognitionConfig(encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16),
audio=cloud_speech.RecognitionAudio(content=b'content_blob'),
)
def test_streaming_recognize(transport: str = 'grpc', request_type=cloud_speech.StreamingRecognizeRequest):
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = iter([cloud_speech.StreamingRecognizeResponse()])
response = client.streaming_recognize(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, cloud_speech.StreamingRecognizeResponse)
def test_streaming_recognize_from_dict():
test_streaming_recognize(request_type=dict)
@pytest.mark.asyncio
async def test_streaming_recognize_async(transport: str = 'grpc_asyncio', request_type=cloud_speech.StreamingRecognizeRequest):
client = SpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_recognize),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[cloud_speech.StreamingRecognizeResponse()])
response = await client.streaming_recognize(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, cloud_speech.StreamingRecognizeResponse)
@pytest.mark.asyncio
async def test_streaming_recognize_async_from_dict():
await test_streaming_recognize_async(request_type=dict)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpeechClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.SpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpeechClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SpeechClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SpeechGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.SpeechGrpcTransport,
transports.SpeechGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.SpeechGrpcTransport,
)
def test_speech_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SpeechTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_speech_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.speech_v1.services.speech.transports.SpeechTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.SpeechTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'recognize',
'long_running_recognize',
'streaming_recognize',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_speech_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.speech_v1.services.speech.transports.SpeechTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpeechTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_speech_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.speech_v1.services.speech.transports.SpeechTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpeechTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_speech_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.speech_v1.services.speech.transports.SpeechTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpeechTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_speech_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SpeechClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_speech_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SpeechClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SpeechGrpcTransport,
transports.SpeechGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_speech_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SpeechGrpcTransport,
transports.SpeechGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_speech_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SpeechGrpcTransport, grpc_helpers),
(transports.SpeechGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_speech_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"speech.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="speech.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.SpeechGrpcTransport, transports.SpeechGrpcAsyncIOTransport])
def test_speech_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_speech_host_no_port():
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='speech.googleapis.com'),
)
assert client.transport._host == 'speech.googleapis.com:443'
def test_speech_host_with_port():
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='speech.googleapis.com:8000'),
)
assert client.transport._host == 'speech.googleapis.com:8000'
def test_speech_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SpeechGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_speech_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SpeechGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SpeechGrpcTransport, transports.SpeechGrpcAsyncIOTransport])
def test_speech_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SpeechGrpcTransport, transports.SpeechGrpcAsyncIOTransport])
def test_speech_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_speech_grpc_lro_client():
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_speech_grpc_lro_async_client():
client = SpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc_asyncio',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsAsyncClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = SpeechClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = SpeechClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SpeechClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder, )
actual = SpeechClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = SpeechClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SpeechClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization, )
actual = SpeechClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = SpeechClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SpeechClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project, )
actual = SpeechClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = SpeechClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SpeechClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = SpeechClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = SpeechClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SpeechClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.SpeechTransport, '_prep_wrapped_messages') as prep:
client = SpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.SpeechTransport, '_prep_wrapped_messages') as prep:
transport_class = SpeechClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 |
tux-00/ansible | lib/ansible/modules/net_tools/basics/uri.py | 32 | 18754 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Romeo Theriault <romeot () hawaii.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# see examples/playbooks/uri.yml
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: uri
short_description: Interacts with webservices
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
HTTP authentication mechanisms.
version_added: "1.1"
options:
url:
description:
- HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
required: true
default: null
dest:
description:
- path of where to download the file to (if desired). If I(dest) is a
directory, the basename of the file on the remote server will be used.
required: false
default: null
user:
description:
- username for the module to use for Digest, Basic or WSSE authentication.
required: false
default: null
password:
description:
- password for the module to use for Digest, Basic or WSSE authentication.
required: false
default: null
body:
description:
- The body of the http request/response to the web service. If C(body_format) is set
to 'json' it will take an already formatted JSON string or convert a data structure
into JSON.
required: false
default: null
body_format:
description:
- The serialization format of the body. When set to json, encodes the
body argument, if needed, and automatically sets the Content-Type header accordingly.
As of C(2.3) it is possible to override the `Content-Type` header, when
set to json via the I(headers) option.
required: false
choices: [ "raw", "json" ]
default: raw
version_added: "2.0"
method:
description:
- The HTTP method of the request or response. It MUST be uppercase.
required: false
choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ]
default: "GET"
return_content:
description:
- Whether or not to return the body of the request as a "content" key in
the dictionary result. If the reported Content-type is
"application/json", then the JSON is additionally loaded into a key
called C(json) in the dictionary results.
required: false
choices: [ "yes", "no" ]
default: "no"
force_basic_auth:
description:
- The library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail. This option forces the sending of the Basic authentication header
upon initial request.
required: false
choices: [ "yes", "no" ]
default: "no"
follow_redirects:
description:
- Whether or not the URI module should follow redirects. C(all) will follow all redirects.
C(safe) will follow only "safe" redirects, where "safe" means that the client is only
doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility,
where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no)
are deprecated and will be removed in some future version of Ansible.
required: false
choices: [ "all", "safe", "none" ]
default: "safe"
creates:
description:
- a filename, when it already exists, this step will not be run.
required: false
removes:
description:
- a filename, when it does not exist, this step will not be run.
required: false
status_code:
description:
- A valid, numeric, HTTP status code that signifies success of the
request. Can also be comma separated list of status codes.
required: false
default: 200
timeout:
description:
- The socket level timeout in seconds
required: false
default: 30
HEADER_:
description:
- Any parameter starting with "HEADER_" is a sent with your request as a header.
For example, HEADER_Content-Type="application/json" would send the header
"Content-Type" along with your request with a value of "application/json".
This option is deprecated as of C(2.1) and may be removed in a future
release. Use I(headers) instead.
required: false
default: null
headers:
description:
- Add custom HTTP headers to a request in the format of a YAML hash. As
of C(2.3) supplying C(Content-Type) here will override the header
generated by supplying C(json) for I(body_format).
required: false
default: null
version_added: '2.1'
others:
description:
- all arguments accepted by the M(file) module also work here
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only
set to C(no) used on personally controlled sites using self-signed
certificates. Prior to 1.9.2 the code defaulted to C(no).
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: '1.9.2'
client_cert:
required: false
default: null
description:
- PEM formatted certificate chain file to be used for SSL client
authentication. This file can also include the key as well, and if
the key is included, I(client_key) is not required
version_added: 2.4
client_key:
required: false
default: null
description:
- PEM formatted file that contains your private key to be used for SSL
client authentication. If I(client_cert) contains both the certificate
and key, this option is not required.
version_added: 2.4
notes:
- The dependency on httplib2 was removed in Ansible 2.1
author: "Romeo Theriault (@romeotheriault)"
'''
EXAMPLES = '''
- name: Check that you can connect (GET) to a page and it returns a status 200
uri:
url: http://www.example.com
# Check that a page returns a status 200 and fail if the word AWESOME is not
# in the page contents.
- uri:
url: http://www.example.com
return_content: yes
register: webpage
- name: Fail if AWESOME is not in the page content
fail:
when: "'AWESOME' not in webpage.content"
- name: Create a JIRA issue
uri:
url: https://your.jira.example.com/rest/api/2/issue/
method: POST
user: your_username
password: your_pass
body: "{{ lookup('file','issue.json') }}"
force_basic_auth: yes
status_code: 201
body_format: json
# Login to a form based webpage, then use the returned cookie to
# access the app in later tasks
- uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body: "name=your_username&password=your_password&enter=Sign%20in"
status_code: 302
headers:
Content-Type: "application/x-www-form-urlencoded"
register: login
- uri:
url: https://your.form.based.auth.example.com/dashboard.php
method: GET
return_content: yes
headers:
Cookie: "{{login.set_cookie}}"
- name: Queue build of a project in Jenkins
uri:
url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}"
method: GET
user: "{{ jenkins.user }}"
password: "{{ jenkins.password }}"
force_basic_auth: yes
status_code: 201
'''
import cgi
import datetime
import os
import shutil
import tempfile
try:
import json
except ImportError:
import simplejson as json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.six as six
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import fetch_url, url_argument_spec
def write_file(module, url, dest, content):
# create a tempfile with some test content
fd, tmpsrc = tempfile.mkstemp()
f = open(tmpsrc, 'wb')
try:
f.write(content)
except Exception:
err = get_exception()
os.remove(tmpsrc)
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Source %s does not exist" % (tmpsrc))
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc))
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not writable" % (dest))
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not readable" % (dest))
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest)))
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception:
err = get_exception()
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
os.remove(tmpsrc)
def url_filename(url):
fn = os.path.basename(six.moves.urllib.parse.urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def absolute_location(url, location):
"""Attempts to create an absolute URL based on initial URL, and
next URL, specifically in the case of a ``Location`` header.
"""
if '://' in location:
return location
elif location.startswith('/'):
parts = six.moves.urllib.parse.urlsplit(url)
base = url.replace(parts[2], '')
return '%s%s' % (base, location)
elif not location.startswith('/'):
base = os.path.dirname(url)
return '%s/%s' % (base, location)
else:
return location
def uri(module, url, dest, body, body_format, method, headers, socket_timeout):
# is dest is set and is a directory, let's check if we get redirected and
# set the filename from that url
redirected = False
redir_info = {}
r = {}
if dest is not None:
# Stash follow_redirects, in this block we don't want to follow
# we'll reset back to the supplied value soon
follow_redirects = module.params['follow_redirects']
module.params['follow_redirects'] = False
dest = os.path.expanduser(dest)
if os.path.isdir(dest):
# first check if we are redirected to a file download
_, redir_info = fetch_url(module, url, data=body,
headers=headers,
method=method,
timeout=socket_timeout)
# if we are redirected, update the url with the location header,
# and update dest with the new url filename
if redir_info['status'] in (301, 302, 303, 307):
url = redir_info['location']
redirected = True
dest = os.path.join(dest, url_filename(url))
# if destination file already exist, only download if file newer
if os.path.exists(dest):
t = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
tstamp = t.strftime('%a, %d %b %Y %H:%M:%S +0000')
headers['If-Modified-Since'] = tstamp
# Reset follow_redirects back to the stashed value
module.params['follow_redirects'] = follow_redirects
resp, info = fetch_url(module, url, data=body, headers=headers,
method=method, timeout=socket_timeout)
try:
content = resp.read()
except AttributeError:
# there was no content, but the error read()
# may have been stored in the info as 'body'
content = info.pop('body', '')
r['redirected'] = redirected or info['url'] != url
r.update(redir_info)
r.update(info)
return r, content, dest
def main():
argument_spec = url_argument_spec()
argument_spec.update(dict(
dest = dict(required=False, default=None, type='path'),
url_username = dict(required=False, default=None, aliases=['user']),
url_password = dict(required=False, default=None, aliases=['password'], no_log=True),
body = dict(required=False, default=None, type='raw'),
body_format = dict(required=False, default='raw', choices=['raw', 'json']),
method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH', 'TRACE', 'CONNECT', 'REFRESH']),
return_content = dict(required=False, default='no', type='bool'),
follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']),
creates = dict(required=False, default=None, type='path'),
removes = dict(required=False, default=None, type='path'),
status_code = dict(required=False, default=[200], type='list'),
timeout = dict(required=False, default=30, type='int'),
headers = dict(required=False, type='dict', default={})
))
module = AnsibleModule(
argument_spec=argument_spec,
check_invalid_arguments=False,
add_file_common_args=True
)
url = module.params['url']
body = module.params['body']
body_format = module.params['body_format'].lower()
method = module.params['method']
dest = module.params['dest']
return_content = module.params['return_content']
creates = module.params['creates']
removes = module.params['removes']
status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
dict_headers = module.params['headers']
if body_format == 'json':
# Encode the body unless its a string, then assume it is pre-formatted JSON
if not isinstance(body, six.string_types):
body = json.dumps(body)
lower_header_keys = [key.lower() for key in dict_headers]
if 'content-type' not in lower_header_keys:
dict_headers['Content-Type'] = 'application/json'
# Grab all the http headers. Need this hack since passing multi-values is
# currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}')
for key, value in six.iteritems(module.params):
if key.startswith("HEADER_"):
module.deprecate('Supplying headers via HEADER_* is deprecated and '
'will be removed in a future version. Please use '
'`headers` to supply headers for the request')
skey = key.replace("HEADER_", "")
dict_headers[skey] = value
if creates is not None:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of uri executions.
if os.path.exists(creates):
module.exit_json(stdout="skipped, since %s exists" % creates, changed=False, rc=0)
if removes is not None:
# do not run the command if the line contains removes=filename
# and the filename do not exists. This allows idempotence
# of uri executions.
if not os.path.exists(removes):
module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, rc=0)
# Make the request
resp, content, dest = uri(module, url, dest, body, body_format, method,
dict_headers, socket_timeout)
resp['status'] = int(resp['status'])
# Write the file out if requested
if dest is not None:
if resp['status'] == 304:
changed = False
else:
write_file(module, url, dest, content)
# allow file attribute changes
changed = True
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
changed = module.set_fs_attributes_if_different(file_args, changed)
resp['path'] = dest
else:
changed = False
# Transmogrify the headers, replacing '-' with '_', since variables dont
# work with dashes.
# In python3, the headers are title cased. Lowercase them to be
# compatible with the python2 behaviour.
uresp = {}
for key, value in six.iteritems(resp):
ukey = key.replace("-", "_").lower()
uresp[ukey] = value
try:
uresp['location'] = absolute_location(url, uresp['location'])
except KeyError:
pass
# Default content_encoding to try
content_encoding = 'utf-8'
if 'content_type' in uresp:
content_type, params = cgi.parse_header(uresp['content_type'])
if 'charset' in params:
content_encoding = params['charset']
u_content = to_text(content, encoding=content_encoding)
if 'application/json' in content_type or 'text/json' in content_type:
try:
js = json.loads(u_content)
uresp['json'] = js
except:
pass
else:
u_content = to_text(content, encoding=content_encoding)
if resp['status'] not in status_code:
uresp['msg'] = 'Status code was not %s: %s' % (status_code, uresp.get('msg', ''))
module.fail_json(content=u_content, **uresp)
elif return_content:
module.exit_json(changed=changed, content=u_content, **uresp)
else:
module.exit_json(changed=changed, **uresp)
if __name__ == '__main__':
main()
| gpl-3.0 |
jspraul/bite-project | deps/mrtaskman/server/mapreduce/namespace_range.py | 27 | 12792 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represents a lexographic range of namespaces."""
import itertools
import string
from google.appengine.api import datastore
from google.appengine.ext import db
from google.appengine.ext.db import metadata
NAMESPACE_CHARACTERS = ''.join(sorted(string.digits +
string.lowercase +
string.uppercase +
'._-'))
MAX_NAMESPACE_LENGTH = 100
MIN_NAMESPACE = ''
def _setup_constants(alphabet=NAMESPACE_CHARACTERS,
max_length=MAX_NAMESPACE_LENGTH):
"""Calculate derived constant values. Only useful for testing."""
global NAMESPACE_CHARACTERS
global MAX_NAMESPACE_LENGTH
global MAX_NAMESPACE
global _LEX_DISTANCE
NAMESPACE_CHARACTERS = alphabet
MAX_NAMESPACE_LENGTH = max_length
MAX_NAMESPACE = NAMESPACE_CHARACTERS[-1] * MAX_NAMESPACE_LENGTH
# _LEX_DISTANCE will contain the lexical distance between two adjacent
# characters in NAMESPACE_CHARACTERS at each character index. This is used
# to calculate the ordinal for each string. Example:
# NAMESPACE_CHARACTERS = 'ab'
# MAX_NAMESPACE_LENGTH = 3
# _LEX_DISTANCE = [1, 3, 7]
# '' => 0
# 'a' => 1
# 'aa' => 2
# 'aaa' => 3
# 'aab' => 4 - Distance between 'aaa' and 'aab' is 1.
# 'ab' => 5 - Distance between 'aa' and 'ab' is 3.
# 'aba' => 6
# 'abb' => 7
# 'b' => 8 - Distance between 'a' and 'b' is 7.
# 'ba' => 9
# 'baa' => 10
# 'bab' => 11
# ...
# _namespace_to_ord('bab') = (1 * 7 + 1) + (0 * 3 + 1) + (1 * 1 + 1) = 11
_LEX_DISTANCE = [1]
for i in range(1, MAX_NAMESPACE_LENGTH):
_LEX_DISTANCE.append(
_LEX_DISTANCE[i-1] * len(NAMESPACE_CHARACTERS) + 1)
del i
_setup_constants()
def _ord_to_namespace(n, _max_length=None):
"""Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
Returns:
A string representing the nth namespace in lexographical order.
"""
if _max_length is None:
_max_length = MAX_NAMESPACE_LENGTH
length = _LEX_DISTANCE[_max_length - 1]
if n == 0:
return ''
n -= 1
return (NAMESPACE_CHARACTERS[n / length] +
_ord_to_namespace(n % length, _max_length - 1))
def _namespace_to_ord(namespace):
"""Converts a namespace string into an int representing its lexographic order.
>>> _namespace_to_ord('')
''
>>> _namespace_to_ord('_')
1
>>> _namespace_to_ord('__')
2
Args:
namespace: A namespace string.
Returns:
An int representing the lexographical order of the given namespace string.
"""
n = 0
for i, c in enumerate(namespace):
n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] *
NAMESPACE_CHARACTERS.index(c)
+ 1)
return n
def _key_for_namespace(namespace, app):
"""Return the __namespace__ key for a namespace.
Args:
namespace: The namespace whose key is requested.
app: The id of the application that the key belongs to.
Returns:
A db.Key representing the namespace.
"""
if namespace:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
namespace,
_app=app)
else:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
metadata.Namespace.EMPTY_NAMESPACE_ID,
_app=app)
class NamespaceRange(object):
"""An inclusive lexographical range of namespaces.
This class is immutable.
"""
def __init__(self,
namespace_start=None,
namespace_end=None,
_app=None):
"""Initializes a NamespaceRange instance.
Args:
namespace_start: A string representing the start of the namespace range.
namespace_start is included in the range. If namespace_start is None
then the lexographically first namespace is used.
namespace_end: A string representing the end of the namespace range.
namespace_end is included in the range and must be >= namespace_start.
If namespace_end is None then the lexographically last namespace is
used.
Raises:
ValueError: if namespace_start > namespace_end.
"""
if namespace_start is None:
namespace_start = MIN_NAMESPACE
if namespace_end is None:
namespace_end = MAX_NAMESPACE
if namespace_start > namespace_end:
raise ValueError('namespace_start (%r) > namespace_end (%r)' % (
namespace_start, namespace_end))
self.__namespace_start = namespace_start
self.__namespace_end = namespace_end
self.__app = _app
@property
def app(self):
return self.__app
@property
def namespace_start(self):
return self.__namespace_start
@property
def namespace_end(self):
return self.__namespace_end
@property
def is_single_namespace(self):
"""True if the namespace range only includes a single namespace."""
return self.namespace_start == self.namespace_end
def split_range(self):
"""Splits the NamespaceRange into two nearly equal-sized ranges.
Returns:
If this NamespaceRange contains a single namespace then a list containing
this NamespaceRange is returned. Otherwise a two-element list containing
two NamespaceRanges whose total range is identical to this
NamespaceRange's is returned.
"""
if self.is_single_namespace:
return [self]
mid_point = (_namespace_to_ord(self.namespace_start) +
_namespace_to_ord(self.namespace_end)) // 2
return [NamespaceRange(self.namespace_start,
_ord_to_namespace(mid_point),
_app=self.app),
NamespaceRange(_ord_to_namespace(mid_point+1),
self.namespace_end,
_app=self.app)]
def __eq__(self, o):
return (self.namespace_start == o.namespace_start and
self.namespace_end == o.namespace_end)
def __hash__(self):
return hash((self.namespace_start, self.namespace_end, self.app))
def __repr__(self):
if self.app is None:
return 'NamespaceRange(namespace_start=%r, namespace_end=%r)' % (
self.namespace_start, self.namespace_end)
else:
return 'NamespaceRange(namespace_start=%r, namespace_end=%r, _app=%r)' % (
self.namespace_start, self.namespace_end, self.app)
def with_start_after(self, after_namespace):
"""Returns a copy of this NamespaceName with a new namespace_start.
Args:
after_namespace: A namespace string.
Returns:
A NamespaceRange object whose namespace_start is the lexographically next
namespace after the given namespace string.
Raises:
ValueError: if the NamespaceRange includes only a single namespace.
"""
namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1)
return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)
def make_datastore_query(self):
"""Returns a datastore.Query that generates all namespaces in the range.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
"""
filters = {}
filters['__key__ >= '] = _key_for_namespace(
self.namespace_start, self.app)
filters['__key__ <= '] = _key_for_namespace(
self.namespace_end, self.app)
return datastore.Query('__namespace__',
filters=filters,
keys_only=True,
_app=self.app)
def normalized_start(self):
"""Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exlcude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore.
"""
namespaces_after_key = self.make_datastore_query().Get(1)
if not namespaces_after_key:
return None
namespace_after_key = namespaces_after_key[0].name() or ''
return NamespaceRange(namespace_after_key,
self.namespace_end,
_app=self.app)
def to_json_object(self):
"""Returns a dict representation that can be serialized to JSON."""
obj_dict = dict(namespace_start=self.namespace_start,
namespace_end=self.namespace_end)
if self.app is not None:
obj_dict['app'] = self.app
return obj_dict
@classmethod
def from_json_object(cls, json):
"""Returns a NamespaceRange from an object deserialized from JSON."""
return cls(json['namespace_start'],
json['namespace_end'],
_app=json.get('app'))
# TODO(user): Implement an option where the returned namespace range is
# not normalized using with_start_after to support consistent namespace
# queries.
@classmethod
def split(cls,
n,
contiguous,
can_query=itertools.chain(itertools.repeat(True, 50),
itertools.repeat(False)).next,
_app=None):
"""Splits the complete NamespaceRange into n equally-sized NamespaceRanges.
Args:
n: The maximum number of NamespaceRanges to return. Fewer than n
namespaces may be returned.
contiguous: If True then the returned NamespaceRanges will cover the
entire space of possible namespaces (i.e. from MIN_NAMESPACE to
MAX_NAMESPACE) without gaps. If False then the returned
NamespaceRanges may exclude namespaces that don't appear in the
datastore.
can_query: A function that returns True if split() can query the datastore
to generate more fair namespace range splits, and False otherwise.
If not set then split() is allowed to make 50 datastore queries.
Returns:
A list of at most n NamespaceRanges representing a near-equal distribution
of actual existant datastore namespaces. The returned list will be sorted
lexographically.
Raises:
ValueError: if n is < 1.
"""
if n < 1:
raise ValueError('n must be >= 1')
ns_range = NamespaceRange(_app=_app)
if can_query():
ns_range = ns_range.normalized_start()
if ns_range is None:
if contiguous:
return [NamespaceRange(_app=_app)]
else:
return []
ranges = [ns_range]
singles = []
while ranges and (len(ranges) + len(singles)) < n:
namespace_range = ranges.pop(0)
if namespace_range.is_single_namespace:
singles.append(namespace_range)
else:
left, right = namespace_range.split_range()
if can_query():
right = right.normalized_start()
if right is not None:
ranges.append(right)
ranges.append(left)
ns_ranges = sorted(singles + ranges,
key=lambda ns_range: ns_range.namespace_start)
if contiguous:
if not ns_ranges:
# This condition is possible if every namespace was deleted after the
# first call to ns_range.normalized_start().
return [NamespaceRange(_app=_app)]
continuous_ns_ranges = []
for i in range(len(ns_ranges)):
if i == 0:
namespace_start = MIN_NAMESPACE
else:
namespace_start = ns_ranges[i].namespace_start
if i == len(ns_ranges) - 1:
namespace_end = MAX_NAMESPACE
else:
namespace_end = _ord_to_namespace(
_namespace_to_ord(ns_ranges[i+1].namespace_start) - 1)
continuous_ns_ranges.append(NamespaceRange(namespace_start,
namespace_end,
_app=_app))
return continuous_ns_ranges
else:
return ns_ranges
| apache-2.0 |
hackebrot/pytest | testing/test_runner_xunit.py | 1 | 7636 | """
test correct setup/teardowns at
module, class, and instance level
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
def test_module_and_function_setup(testdir):
reprec = testdir.inline_runsource(
"""
modlevel = []
def setup_module(module):
assert not modlevel
module.modlevel.append(42)
def teardown_module(module):
modlevel.pop()
def setup_function(function):
function.answer = 17
def teardown_function(function):
del function.answer
def test_modlevel():
assert modlevel[0] == 42
assert test_modlevel.answer == 17
class TestFromClass(object):
def test_module(self):
assert modlevel[0] == 42
assert not hasattr(test_modlevel, 'answer')
"""
)
rep = reprec.matchreport("test_modlevel")
assert rep.passed
rep = reprec.matchreport("test_module")
assert rep.passed
def test_module_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource(
"""
values = []
def setup_module(module):
values.append(1)
0/0
def test_nothing():
pass
def teardown_module(module):
values.append(2)
"""
)
reprec.assertoutcome(failed=1)
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.values == [1]
def test_setup_function_failure_no_teardown(testdir):
reprec = testdir.inline_runsource(
"""
modlevel = []
def setup_function(function):
modlevel.append(1)
0/0
def teardown_function(module):
modlevel.append(2)
def test_func():
pass
"""
)
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.modlevel == [1]
def test_class_setup(testdir):
reprec = testdir.inline_runsource(
"""
class TestSimpleClassSetup(object):
clslevel = []
def setup_class(cls):
cls.clslevel.append(23)
def teardown_class(cls):
cls.clslevel.pop()
def test_classlevel(self):
assert self.clslevel[0] == 23
class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):
def test_classlevel_anothertime(self):
assert self.clslevel == [23]
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
assert not TestInheritedClassSetupStillWorks.clslevel
"""
)
reprec.assertoutcome(passed=1 + 2 + 1)
def test_class_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource(
"""
class TestSimpleClassSetup(object):
clslevel = []
def setup_class(cls):
0/0
def teardown_class(cls):
cls.clslevel.append(1)
def test_classlevel(self):
pass
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
"""
)
reprec.assertoutcome(failed=1, passed=1)
def test_method_setup(testdir):
reprec = testdir.inline_runsource(
"""
class TestSetupMethod(object):
def setup_method(self, meth):
self.methsetup = meth
def teardown_method(self, meth):
del self.methsetup
def test_some(self):
assert self.methsetup == self.test_some
def test_other(self):
assert self.methsetup == self.test_other
"""
)
reprec.assertoutcome(passed=2)
def test_method_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource(
"""
class TestMethodSetup(object):
clslevel = []
def setup_method(self, method):
self.clslevel.append(1)
0/0
def teardown_method(self, method):
self.clslevel.append(2)
def test_method(self):
pass
def test_cleanup():
assert TestMethodSetup.clslevel == [1]
"""
)
reprec.assertoutcome(failed=1, passed=1)
def test_method_setup_uses_fresh_instances(testdir):
reprec = testdir.inline_runsource(
"""
class TestSelfState1(object):
memory = []
def test_hello(self):
self.memory.append(self)
def test_afterhello(self):
assert self != self.memory[0]
"""
)
reprec.assertoutcome(passed=2, failed=0)
def test_setup_that_skips_calledagain(testdir):
p = testdir.makepyfile(
"""
import pytest
def setup_module(mod):
pytest.skip("x")
def test_function1():
pass
def test_function2():
pass
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(skipped=2)
def test_setup_fails_again_on_all_tests(testdir):
p = testdir.makepyfile(
"""
import pytest
def setup_module(mod):
raise ValueError(42)
def test_function1():
pass
def test_function2():
pass
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=2)
def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
p = testdir.makepyfile(
"""
import pytest
def setup_module(mod):
raise ValueError(42)
@pytest.fixture
def hello(request):
raise ValueError("xyz43")
def test_function1(hello):
pass
def test_function2(hello):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*function1*",
"*ValueError*42*",
"*function2*",
"*ValueError*42*",
"*2 error*",
]
)
assert "xyz43" not in result.stdout.str()
@pytest.mark.parametrize("arg", ["", "arg"])
def test_setup_teardown_function_level_with_optional_argument(
testdir, monkeypatch, arg
):
"""parameter to setup/teardown xunit-style functions parameter is now optional (#1728)."""
import sys
trace_setups_teardowns = []
monkeypatch.setattr(
sys, "trace_setups_teardowns", trace_setups_teardowns, raising=False
)
p = testdir.makepyfile(
"""
import pytest
import sys
trace = sys.trace_setups_teardowns.append
def setup_module({arg}): trace('setup_module')
def teardown_module({arg}): trace('teardown_module')
def setup_function({arg}): trace('setup_function')
def teardown_function({arg}): trace('teardown_function')
def test_function_1(): pass
def test_function_2(): pass
class Test(object):
def setup_method(self, {arg}): trace('setup_method')
def teardown_method(self, {arg}): trace('teardown_method')
def test_method_1(self): pass
def test_method_2(self): pass
""".format(
arg=arg
)
)
result = testdir.inline_run(p)
result.assertoutcome(passed=4)
expected = [
"setup_module",
"setup_function",
"teardown_function",
"setup_function",
"teardown_function",
"setup_method",
"teardown_method",
"setup_method",
"teardown_method",
"teardown_module",
]
assert trace_setups_teardowns == expected
| mit |
slashdd/sos | sos/report/plugins/manageiq.py | 5 | 2925 | # -*- python -*-
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc., Pep Turró Mauri <pep@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin
from os import environ
import os.path
class ManageIQ(Plugin, RedHatPlugin):
short_desc = 'ManageIQ/CloudForms related information'
plugin_name = 'manageiq'
miq_dir = '/var/www/miq/vmdb'
packages = (
'cfme',
'cfme-appliance',
'cfme-gemset',
'cfme-appliance-tools',
'cfme-appliance-common'
)
files = (
os.path.join(miq_dir, 'BUILD'),
os.path.join(miq_dir, 'GUID'),
os.path.join(miq_dir, 'VERSION'),
os.path.join(miq_dir, 'REGION')
)
# Config files to collect from miq_dir/config/
miq_conf_dir = os.path.join(miq_dir, "config")
miq_conf_files = [
'*.rb',
'*.yaml',
'*.yml',
'*.yml.db',
'*.yml.sample',
'settings/*.yml',
'environments/*.rb',
'environments/*.yml',
'environments/patches/*.rb',
'initializers/*.rb',
'database.yml.old',
'brakeman.ignore',
]
# Log files to collect from miq_dir/log/
miq_log_dir = os.path.join(miq_dir, "log")
miq_main_log_files = [
'ansible_tower.log',
'top_output.log',
'evm.log',
'production.log',
'automation.log',
]
miq_log_files = [
'*.log',
'apache/*.log',
'*.txt',
'*.yml',
]
def setup(self):
if self.get_option("all_logs"):
# turn all log files to a glob to include logrotated ones
self.miq_log_files = map(lambda x: x + '*', self.miq_log_files)
self.add_copy_spec(list(self.files))
self.add_copy_spec([
os.path.join(self.miq_conf_dir, x) for x in self.miq_conf_files
])
# Collect main log files without size limit.
self.add_copy_spec([
os.path.join(self.miq_log_dir, x) for x in self.miq_main_log_files
], sizelimit=0)
self.add_copy_spec([
os.path.join(self.miq_log_dir, x) for x in self.miq_log_files
])
self.add_copy_spec([
"/var/log/tower.log",
"/etc/manageiq/postgresql.conf.d/*.conf"
])
if environ.get("APPLIANCE_PG_DATA"):
pg_dir = environ.get("APPLIANCE_PG_DATA")
self.add_copy_spec([
os.path.join(pg_dir, 'pg_log'),
os.path.join(pg_dir, 'postgresql.conf')
])
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
mmolero/pcloudpy | pcloudpy/core/filters/vtkPointSetNormalsEstimation.py | 1 | 5076 | """
Class that define a normal estimation method based on vtkPointSetNormalsEstimation by David Doria
"""
__all__ = ["vtkPointSetNormalsEstimation"]
from pcloudpy.core.filters.base import FilterBase
import numpy as np
from vtk import vtkPolyDataAlgorithm, vtkFloatArray, vtkKdTree, vtkPlane, vtkPolyData, vtkIdList
from scipy.linalg import eigh
FIXED_NUMBER = 0
RADIUS = 1
class vtkPointSetNormalsEstimation(FilterBase):
"""
vtkPointSetNormalEstimation filter estimates normals of a point set using a local best fit plane.
At every point in the point set, vtkPointSetNormalEstimation computes the best
fit plane of the set of points within a specified radius of the point (or a fixed number of neighbors).
The normal of this plane is used as an estimate of the normal of the surface that would go through
the points.
vtkPointSetNormalEstimation Class is a python implementation based on the version included in PointSetProcessing by
David Doria, see https://github.com/daviddoria/PointSetProcessing
"""
def __init__(self, number_neighbors = 10, mode = 0, radius = 1):
self.mode = mode #FIXED_NUMBER
self.number_neighbors = number_neighbors
self.radius = radius
def set_mode_to_fixednumber(self):
self.mode = FIXED_NUMBER
def set_mode_to_radius(self):
self.mode = RADIUS
def set_input(self, input_data):
"""
set input data
Parameters
----------
input-data : vtkPolyData
Returns
-------
is_valid: bool
Returns True if the input_data is valid for processing
"""
if isinstance(input_data, vtkPolyData):
super(vtkPointSetNormalsEstimation, self).set_input(input_data)
return True
else:
return False
def update(self):
normalArray = vtkFloatArray()
normalArray.SetNumberOfComponents( 3 )
normalArray.SetNumberOfTuples( self.input_.GetNumberOfPoints() )
normalArray.SetName( "Normals" )
kDTree = vtkKdTree()
kDTree.BuildLocatorFromPoints(self.input_.GetPoints())
# Estimate the normal at each point.
for pointId in range(0, self.input_.GetNumberOfPoints()):
point = [0,0,0]
self.input_.GetPoint(pointId, point)
neighborIds = vtkIdList()
if self.mode == FIXED_NUMBER:
kDTree.FindClosestNPoints(self.number_neighbors, point, neighborIds)
elif self.mode == RADIUS:
kDTree.FindPointsWithinRadius(self.radius, point, neighborIds)
#If there are not at least 3 points within the specified radius (the current
# #point gets included in the neighbors set), a plane is not defined. Instead,
# #force it to use 3 points.
if neighborIds.GetNumberOfIds() < 3 :
kDTree.FindClosestNPoints(3, point, neighborIds)
bestPlane = vtkPlane()
self.best_fit_plane(self.input_.GetPoints(), bestPlane, neighborIds)
normal = bestPlane.GetNormal()
normalArray.SetTuple( pointId, normal )
self.output_ = vtkPolyData()
self.output_.ShallowCopy(self.input_)
self.output_.GetPointData().SetNormals(normalArray)
def best_fit_plane(self, points, bestPlane, idsToUse):
#Compute the best fit (least squares) plane through a set of points.
dnumPoints = idsToUse.GetNumberOfIds()
#Find the center of mass of the points
center = self.center_of_mass(points, idsToUse)
a = np.zeros((3,3))
for pointId in range(0, dnumPoints):
x = np.asarray([0,0,0])
xp = np.asarray([0,0,0])
points.GetPoint(idsToUse.GetId(pointId), x)
xp = x - center
a[0,:] += xp[0] * xp[:]
a[1,:] += xp[1] * xp[:]
a[2,:] += xp[2] * xp[:]
#Divide by N-1
a /= (dnumPoints-1)
eigval, eigvec = eigh(a, overwrite_a=True, overwrite_b=True)
#Jacobi iteration for the solution of eigenvectors/eigenvalues of a 3x3 real symmetric matrix.
#Square 3x3 matrix a; output eigenvalues in w; and output eigenvectors in v.
#Resulting eigenvalues/vectors are sorted in decreasing order; eigenvectors are normalized.
#Set the plane normal to the smallest eigen vector
bestPlane.SetNormal(eigvec[0,0], eigvec[1,0], eigvec[2,0])
#Set the plane origin to the center of mass
bestPlane.SetOrigin(center[0], center[1], center[2])
def center_of_mass(self, points, idsToUse):
#Compute the center of mass of a set of points.
point = np.asarray([0.0, 0.0, 0.0])
center = np.asarray([0.0,0.0,0.0])
for i in range(0, idsToUse.GetNumberOfIds() ):
points.GetPoint(idsToUse.GetId(i), point)
center += point
numberOfPoints = float(idsToUse.GetNumberOfIds())
center /= numberOfPoints
return center
| bsd-3-clause |
AlanZatarain/pysal | pysal/spatial_dynamics/ergodic.py | 5 | 4744 | """
:mod:`ergodic` --- summary measures for ergodic Markov chains
=============================================================
"""
__author__ = "Sergio J. Rey <srey@asu.edu>"
__all__ = ['steady_state', 'fmpt']
import numpy as np
import numpy.linalg as la
def steady_state(P):
"""
Calculates the steady state probability vector for a regular Markov
transition matrix P
Parameters
----------
P : matrix (kxk)
an ergodic Markov transition probability matrix
Returns
-------
implicit : matrix (kx1)
steady state distribution
Examples
--------
Taken from Kemeny and Snell. [1]_ Land of Oz example where the states are
Rain, Nice and Snow - so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> p=np.matrix([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
matrix([[ 0.4],
[ 0.2],
[ 0.4]])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive).
"""
v, d = la.eig(np.transpose(P))
# for a regular P maximum eigenvalue will be 1
mv = max(v)
# find its position
i = v.tolist().index(mv)
# normalize eigenvector corresponding to the eigenvalue 1
return d[:, i] / sum(d[:, i])
def fmpt(P):
"""
Calculates the matrix of first mean passage times for an
ergodic transition probability matrix.
Parameters
----------
P : matrix (kxk)
an ergodic Markov transition probability matrix
Returns
-------
M : matrix (kxk)
elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> p=np.matrix([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
matrix([[ 2.5 , 4. , 3.33333333],
[ 2.66666667, 5. , 2.66666667],
[ 3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in Kemeny and Snell (1976) [1]_
References
----------
.. [1] Kemeny, John, G. and J. Laurie Snell (1976) Finite Markov
Chains. Springer-Verlag. Berlin
"""
A = np.zeros_like(P)
ss = steady_state(P)
k = ss.shape[0]
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return M
def var_fmpt(P):
"""
Variances of first mean passage times for an ergodic transition
probability matrix
Parameters
----------
P : matrix (kxk)
an ergodic Markov transition probability matrix
Returns
-------
implic : matrix (kxk)
elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j
Examples
--------
>>> import numpy as np
>>> p=np.matrix([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
matrix([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in Kemeny and Snell (1976) [1]_
References
----------
.. [1] Kemeny, John, G. and J. Laurie Snell (1976) Finite Markov
Chains. Springer-Verlag. Berlin
"""
A = P ** 1000
n, k = A.shape
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
ZM = Z * M
ZMdg = np.diag(np.diag(ZM))
W = M * (2 * Zdg * D - I) + 2 * (ZM - E * ZMdg)
return W - np.multiply(M, M)
| bsd-3-clause |
Curious72/sympy | sympy/tensor/indexed.py | 12 | 17230 | """Module that defines indexed objects
The classes IndexedBase, Indexed and Idx would represent a matrix element
M[i, j] as in the following graph::
1) The Indexed class represents the entire indexed object.
|
___|___
' '
M[i, j]
/ \__\______
| |
| |
| 2) The Idx class represent indices and each Idx can
| optionally contain information about its range.
|
3) IndexedBase represents the `stem' of an indexed object, here `M'.
The stem used by itself is usually taken to represent the entire
array.
There can be any number of indices on an Indexed object. No
transformation properties are implemented in these Base objects, but
implicit contraction of repeated indices is supported.
Note that the support for complicated (i.e. non-atomic) integer
expressions as indices is limited. (This should be improved in
future releases.)
Examples
========
To express the above matrix element example you would write:
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> M = IndexedBase('M')
>>> i, j = symbols('i j', cls=Idx)
>>> M[i, j]
M[i, j]
Repeated indices in a product implies a summation, so to express a
matrix-vector product in terms of Indexed objects:
>>> x = IndexedBase('x')
>>> M[i, j]*x[j]
x[j]*M[i, j]
If the indexed objects will be converted to component based arrays, e.g.
with the code printers or the autowrap framework, you also need to provide
(symbolic or numerical) dimensions. This can be done by passing an
optional shape parameter to IndexedBase upon construction:
>>> dim1, dim2 = symbols('dim1 dim2', integer=True)
>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2))
>>> A.shape
(dim1, 2*dim1, dim2)
>>> A[i, j, 3].shape
(dim1, 2*dim1, dim2)
If an IndexedBase object has no shape information, it is assumed that the
array is as large as the ranges of its indices:
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> M[i, j].shape
(m, n)
>>> M[i, j].ranges
[(0, m - 1), (0, n - 1)]
The above can be compared with the following:
>>> A[i, 2, j].shape
(dim1, 2*dim1, dim2)
>>> A[i, 2, j].ranges
[(0, m - 1), None, (0, n - 1)]
To analyze the structure of indexed expressions, you can use the methods
get_indices() and get_contraction_structure():
>>> from sympy.tensor import get_indices, get_contraction_structure
>>> get_indices(A[i, j, j])
(set([i]), {})
>>> get_contraction_structure(A[i, j, j])
{(j,): set([A[i, j, j]])}
See the appropriate docstrings for a detailed explanation of the output.
"""
# TODO: (some ideas for improvement)
#
# o test and guarantee numpy compatibility
# - implement full support for broadcasting
# - strided arrays
#
# o more functions to analyze indexed expressions
# - identify standard constructs, e.g matrix-vector product in a subexpression
#
# o functions to generate component based arrays (numpy and sympy.Matrix)
# - generate a single array directly from Indexed
# - convert simple sub-expressions
#
# o sophisticated indexing (possibly in subclasses to preserve simplicity)
# - Idx with range smaller than dimension of Indexed
# - Idx with stepsize != 1
# - Idx with step determined by function call
from __future__ import print_function, division
from sympy.core import Expr, Tuple, Symbol, sympify, S
from sympy.core.compatibility import is_sequence, string_types, NotIterable, range
class IndexException(Exception):
pass
class Indexed(Expr):
"""Represents a mathematical object with indices.
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j)
A[i, j]
It is recommended that Indexed objects are created via IndexedBase:
>>> A = IndexedBase('A')
>>> Indexed('A', i, j) == A[i, j]
True
"""
is_commutative = True
def __new__(cls, base, *args):
from sympy.utilities.misc import filldedent
if not args:
raise IndexException("Indexed needs at least one index.")
if isinstance(base, (string_types, Symbol)):
base = IndexedBase(base)
elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase):
raise TypeError(filldedent("""
Indexed expects string, Symbol or IndexedBase as base."""))
args = list(map(sympify, args))
return Expr.__new__(cls, base, *args)
@property
def base(self):
"""Returns the IndexedBase of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).base
A
>>> B = IndexedBase('B')
>>> B == B[i, j].base
True
"""
return self.args[0]
@property
def indices(self):
"""
Returns the indices of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).indices
(i, j)
"""
return self.args[1:]
@property
def rank(self):
"""
Returns the rank of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, Idx
>>> from sympy import symbols
>>> i, j, k, l, m = symbols('i:m', cls=Idx)
>>> Indexed('A', i, j).rank
2
>>> q = Indexed('A', i, j, k, l, m)
>>> q.rank
5
>>> q.rank == len(q.indices)
True
"""
return len(self.args) - 1
@property
def shape(self):
"""Returns a list with dimensions of each index.
Dimensions is a property of the array, not of the indices. Still, if
the IndexedBase does not define a shape attribute, it is assumed that
the ranges of the indices correspond to the shape of the array.
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> from sympy import symbols
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', m)
>>> A = IndexedBase('A', shape=(n, n))
>>> B = IndexedBase('B')
>>> A[i, j].shape
(n, n)
>>> B[i, j].shape
(m, m)
"""
from sympy.utilities.misc import filldedent
if self.base.shape:
return self.base.shape
try:
return Tuple(*[i.upper - i.lower + 1 for i in self.indices])
except AttributeError:
raise IndexException(filldedent("""
Range is not defined for all indices in: %s""" % self))
except TypeError:
raise IndexException(filldedent("""
Shape cannot be inferred from Idx with
undefined range: %s""" % self))
@property
def ranges(self):
"""Returns a list of tuples with lower and upper range of each index.
If an index does not define the data members upper and lower, the
corresponding slot in the list contains ``None`` instead of a tuple.
Examples
========
>>> from sympy import Indexed,Idx, symbols
>>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges
[(0, 1), (0, 3), (0, 7)]
>>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges
[(0, 2), (0, 2), (0, 2)]
>>> x, y, z = symbols('x y z', integer=True)
>>> Indexed('A', x, y, z).ranges
[None, None, None]
"""
ranges = []
for i in self.indices:
try:
ranges.append(Tuple(i.lower, i.upper))
except AttributeError:
ranges.append(None)
return ranges
def _sympystr(self, p):
indices = list(map(p.doprint, self.indices))
return "%s[%s]" % (p.doprint(self.base), ", ".join(indices))
class IndexedBase(Expr, NotIterable):
"""Represent the base or stem of an indexed object
The IndexedBase class represent an array that contains elements. The main purpose
of this class is to allow the convenient creation of objects of the Indexed
class. The __getitem__ method of IndexedBase returns an instance of
Indexed. Alone, without indices, the IndexedBase class can be used as a
notation for e.g. matrix equations, resembling what you could do with the
Symbol class. But, the IndexedBase class adds functionality that is not
available for Symbol instances:
- An IndexedBase object can optionally store shape information. This can
be used in to check array conformance and conditions for numpy
broadcasting. (TODO)
- An IndexedBase object implements syntactic sugar that allows easy symbolic
representation of array operations, using implicit summation of
repeated indices.
- The IndexedBase object symbolizes a mathematical structure equivalent
to arrays, and is recognized as such for code generation and automatic
compilation and wrapping.
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> A = IndexedBase('A'); A
A
>>> type(A)
<class 'sympy.tensor.indexed.IndexedBase'>
When an IndexedBase object receives indices, it returns an array with named
axes, represented by an Indexed object:
>>> i, j = symbols('i j', integer=True)
>>> A[i, j, 2]
A[i, j, 2]
>>> type(A[i, j, 2])
<class 'sympy.tensor.indexed.Indexed'>
The IndexedBase constructor takes an optional shape argument. If given,
it overrides any shape information in the indices. (But not the index
ranges!)
>>> m, n, o, p = symbols('m n o p', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> A[i, j].shape
(m, n)
>>> B = IndexedBase('B', shape=(o, p))
>>> B[i, j].shape
(o, p)
"""
is_commutative = True
def __new__(cls, label, shape=None, **kw_args):
if isinstance(label, string_types):
label = Symbol(label)
elif isinstance(label, Symbol):
pass
else:
raise TypeError("Base label should be a string or Symbol.")
if is_sequence(shape):
shape = Tuple(*shape)
else:
shape = sympify(shape)
if shape is not None:
obj = Expr.__new__(cls, label, shape, **kw_args)
else:
obj = Expr.__new__(cls, label, **kw_args)
obj._shape = shape
return obj
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return Indexed(self, *indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return Indexed(self, indices, **kw_args)
@property
def shape(self):
"""Returns the shape of the IndexedBase object.
Examples
========
>>> from sympy import IndexedBase, Idx, Symbol
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).shape
(x, y)
Note: If the shape of the IndexedBase is specified, it will override
any shape information given by the indices.
>>> A = IndexedBase('A', shape=(x, y))
>>> B = IndexedBase('B')
>>> i = Idx('i', 2)
>>> j = Idx('j', 1)
>>> A[i, j].shape
(x, y)
>>> B[i, j].shape
(2, 1)
"""
return self._shape
@property
def label(self):
"""Returns the label of the IndexedBase object.
Examples
========
>>> from sympy import IndexedBase
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).label
A
"""
return self.args[0]
def _sympystr(self, p):
return p.doprint(self.label)
class Idx(Expr):
"""Represents an integer index as an Integer or integer expression.
There are a number of ways to create an Idx object. The constructor
takes two arguments:
``label``
An integer or a symbol that labels the index.
``range``
Optionally you can specify a range as either
- Symbol or integer: This is interpreted as a dimension. Lower and
upper bounds are set to 0 and range - 1, respectively.
- tuple: The two elements are interpreted as the lower and upper
bounds of the range, respectively.
Note: the Idx constructor is rather pedantic in that it only accepts
integer arguments. The only exception is that you can use oo and -oo to
specify an unbounded range. For all other cases, both label and bounds
must be declared as integers, e.g. if n is given as an argument then
n.is_integer must return True.
For convenience, if the label is given as a string it is automatically
converted to an integer symbol. (Note: this conversion is not done for
range or dimension arguments.)
Examples
========
>>> from sympy.tensor import Idx
>>> from sympy import symbols, oo
>>> n, i, L, U = symbols('n i L U', integer=True)
If a string is given for the label an integer Symbol is created and the
bounds are both None:
>>> idx = Idx('qwerty'); idx
qwerty
>>> idx.lower, idx.upper
(None, None)
Both upper and lower bounds can be specified:
>>> idx = Idx(i, (L, U)); idx
i
>>> idx.lower, idx.upper
(L, U)
When only a single bound is given it is interpreted as the dimension
and the lower bound defaults to 0:
>>> idx = Idx(i, n); idx.lower, idx.upper
(0, n - 1)
>>> idx = Idx(i, 4); idx.lower, idx.upper
(0, 3)
>>> idx = Idx(i, oo); idx.lower, idx.upper
(0, oo)
"""
is_integer = True
def __new__(cls, label, range=None, **kw_args):
from sympy.utilities.misc import filldedent
if isinstance(label, string_types):
label = Symbol(label, integer=True)
label, range = list(map(sympify, (label, range)))
if label.is_Number:
if not label.is_integer:
raise TypeError("Index is not an integer number.")
return label
if not label.is_integer:
raise TypeError("Idx object requires an integer label.")
elif is_sequence(range):
if len(range) != 2:
raise ValueError(filldedent("""
Idx range tuple must have length 2, but got %s""" % len(range)))
for bound in range:
if not (bound.is_integer or abs(bound) is S.Infinity):
raise TypeError("Idx object requires integer bounds.")
args = label, Tuple(*range)
elif isinstance(range, Expr):
if not (range.is_integer or range is S.Infinity):
raise TypeError("Idx object requires an integer dimension.")
args = label, Tuple(0, range - 1)
elif range:
raise TypeError(filldedent("""
The range must be an ordered iterable or
integer SymPy expression."""))
else:
args = label,
obj = Expr.__new__(cls, *args, **kw_args)
return obj
@property
def label(self):
"""Returns the label (Integer or integer expression) of the Idx object.
Examples
========
>>> from sympy import Idx, Symbol
>>> x = Symbol('x', integer=True)
>>> Idx(x).label
x
>>> j = Symbol('j', integer=True)
>>> Idx(j).label
j
>>> Idx(j + 1).label
j + 1
"""
return self.args[0]
@property
def lower(self):
"""Returns the lower bound of the Index.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).lower
0
>>> Idx('j', 5).lower
0
>>> Idx('j').lower is None
True
"""
try:
return self.args[1][0]
except IndexError:
return
@property
def upper(self):
"""Returns the upper bound of the Index.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).upper
1
>>> Idx('j', 5).upper
4
>>> Idx('j').upper is None
True
"""
try:
return self.args[1][1]
except IndexError:
return
def _sympystr(self, p):
return p.doprint(self.label)
| bsd-3-clause |
LiuRoy/dracula | dracula/thrift/decode.py | 1 | 17917 | # -*- coding=utf8 -*-
import struct
from cStringIO import StringIO
from .common import (
VERSION_MASK,
VERSION_1,
TType,
TState,
TError,
BASIC_TYPE,
ParseData,
)
def unpack_i8(buf):
return struct.unpack("!b", buf)[0]
def unpack_i16(buf):
return struct.unpack("!h", buf)[0]
def unpack_i32(buf):
return struct.unpack("!i", buf)[0]
def unpack_i64(buf):
return struct.unpack("!q", buf)[0]
def unpack_double(buf):
return struct.unpack("!d", buf)[0]
class Decoder(object):
"""thrift解码"""
def __init__(self, service, handler, strict=True, decode_response=True):
self.service = service
self.handler = handler
self.strict = strict
self.decode_response = decode_response
self.current_state = TState.S_VERSION
self.latest_result = None
self.parse_data = ParseData()
self.error_code = TError.NO_ERROR
self._left = ''
self._buf = None
self._process_stack = []
def read(self, size):
"""从缓存中读取长度为size的内容
Args:
size (int): 读取的长度
"""
result = self._buf.read(size)
if len(result) != size:
self._left = result
self._buf = None
return None
return result
def read_basic(self, f_type):
"""读取基本类型数据
Args:
f_type (int): 字段类型
"""
if f_type == TType.BOOL:
read_data = self.read(1)
if read_data:
return bool(unpack_i8(read_data))
elif f_type == TType.BYTE:
read_data = self.read(1)
if read_data:
return unpack_i8(read_data)
elif f_type == TType.I16:
read_data = self.read(2)
if read_data:
return unpack_i16(read_data)
elif f_type == TType.I32:
read_data = self.read(4)
if read_data:
return unpack_i32(read_data)
elif f_type == TType.I64:
read_data = self.read(8)
if read_data:
return unpack_i64(read_data)
elif f_type == TType.DOUBLE:
read_data = self.read(8)
if read_data:
return unpack_double(read_data)
def next_state(self, f_type, f_spec=None):
"""根据当前类型决定下一状态
Args:
f_type (int): 字段类型
f_spec (class)
"""
if f_type in BASIC_TYPE:
self.current_state = TState.S_READ_BASIC
elif f_type == TType.STRING:
self.current_state = TState.S_READ_STRING_SIZE
elif f_type in (TType.LIST, TType.SET):
self.current_state = TState.S_READ_LIST_TYPE
elif f_type == TType.MAP:
self.current_state = TState.S_READ_MAP_KEY_TYPE
elif f_type == TType.STRUCT:
self.current_state = TState.S_READ_FIELD_TYPE
self._process_stack.append(
[TState.S_READ_FIELD_TYPE, f_spec(), -1, None])
else:
self.error_code = TError.INTERNAL_ERROR
def pop_stack(self):
"""回溯处理站"""
result = None
while True:
top = self._process_stack[-1]
if top[0] == TState.S_READ_LIST_TYPE:
if top[2] <= 0:
self._process_stack.pop()
result = top[1]
elif result is not None:
top[1].append(result)
top[2] -= 1
else:
break
elif top[0] == TState.S_READ_MAP_KEY_TYPE:
if top[2] <= 0:
self._process_stack.pop()
result = dict(top[1])
elif result is not None:
if not top[1]:
top[1].append((result, None))
break
else:
if top[1][-1][1] is None:
key, val = top[1].pop()
top[1].append((key, result))
top[2] -= 1
else:
top[1].append((result, None))
break
else:
break
else:
if result is not None:
f_name = top[3][0]
setattr(top[1], f_name, result)
break
def get_type_spec(self):
"""根据栈顶信息找到要处理的type和spec信息"""
top = self._process_stack[-1]
if top[0] in (TState.S_READ_LIST_TYPE,
TState.S_READ_FIELD_TYPE):
return top[3][1], top[3][2]
if top[1]:
if top[1][-1][1] is None:
return top[3][3], top[3][4]
return top[3][1], top[3][2]
return top[3][1], top[3][2]
def parse(self, data):
"""解析二级制数据
Args:
data (string): 待解析数据
"""
self._buf = StringIO(self._left + data)
result = None
while True:
if self.error_code != TError.NO_ERROR:
break
if self.current_state == TState.S_VERSION:
read_data = self.read(4)
if not read_data:
break
self.latest_result = unpack_i32(read_data)
if self.latest_result < 0:
version = self.latest_result & VERSION_MASK
if version != VERSION_1:
self.error_code = TError.BAD_VERSION
self.current_state = TState.S_METHOD_SIZE
else:
if self.strict:
self.error_code = TError.NO_PROTOCOL
self.current_state = TState.S_METHOD_NAME2
elif self.current_state == TState.S_METHOD_SIZE:
read_data = self.read(4)
if not read_data:
break
self.latest_result = unpack_i32(read_data)
self.current_state = TState.S_METHOD_NAME
elif self.current_state == TState.S_METHOD_NAME:
read_data = self.read(self.latest_result)
if not read_data:
break
self.parse_data.method_name = read_data.decode('utf8')
self.current_state = TState.S_SEQUENCE_ID
elif self.current_state == TState.S_METHOD_NAME2:
read_data = self.read(self.latest_result)
if not read_data:
break
self.parse_data.method_name = read_data.decode('utf8')
self.current_state = TState.S_METHOD_TYPE
elif self.current_state == TState.S_METHOD_TYPE:
read_data = self.read(1)
if not read_data:
break
self.current_state = TState.S_SEQUENCE_ID
elif self.current_state == TState.S_SEQUENCE_ID:
read_data = self.read(4)
if not read_data:
break
self.parse_data.sequence_id = unpack_i32(read_data)
if self.parse_data.method_name not in \
self.service.thrift_services:
self.error_code = TError.UNKNOWN_METHOD
else:
self.parse_data.method_args = \
getattr(self.service, self.parse_data.method_name + "_args")()
self.parse_data.method_result = \
getattr(self.service, self.parse_data.method_name + "_result")()
self.current_state = TState.S_READ_FIELD_TYPE
self._process_stack.append([TState.S_READ_FIELD_TYPE,
self.parse_data.method_args,
-1, None])
elif self.current_state == TState.S_READ_FIELD_TYPE:
read_data = self.read(1)
if not read_data:
break
self.latest_result = unpack_i8(read_data)
if self.latest_result == TType.STOP:
top = self._process_stack.pop()
if self._process_stack:
stack_top = self._process_stack[-1]
if stack_top[0] == TState.S_READ_LIST_TYPE:
stack_top[2] -= 1
stack_top[1].append(top[1])
self.pop_stack()
elif stack_top[0] == TState.S_READ_MAP_KEY_TYPE:
if not stack_top[1]:
stack_top[1].append((top[1], None))
else:
if stack_top[1][-1][1] is None:
stack_top[2] -= 1
top_pair = stack_top[1].pop()
stack_top[1].append(
(top_pair[0], top[1]))
self.pop_stack()
else:
stack_top[1].append((top[1], None))
else:
f_name = stack_top[3][0]
setattr(stack_top[1], f_name, top[1])
if self._process_stack[-1][0] in (
TState.S_READ_LIST_TYPE,
TState.S_READ_MAP_KEY_TYPE):
f_spec, f_type = self.get_type_spec()
self.next_state(f_type, f_spec=f_spec)
else:
self.current_state = self._process_stack[-1][0]
else:
result = top[1]
self.current_state = TState.S_PARSE_DONE
else:
self.current_state = TState.S_READ_FIELD_ID
elif self.current_state == TState.S_READ_FIELD_ID:
read_data = self.read(2)
if not read_data:
break
f_type, fid = self.latest_result, unpack_i16(read_data)
obj = self._process_stack[-1][1]
if fid not in obj.thrift_spec:
self.error_code = TError.SKIP_ERROR
else:
if len(obj.thrift_spec[fid]) == 3:
sf_type, f_name, f_req = obj.thrift_spec[fid]
f_container_spec = None
else:
sf_type, f_name, f_container_spec, f_req = obj.thrift_spec[fid]
if sf_type != f_type:
self.error_code = TError.SKIP_ERROR
else:
self._process_stack[-1][3] = (f_name, f_container_spec, f_type)
self.next_state(f_type, f_spec=f_container_spec)
elif self.current_state == TState.S_READ_BASIC:
_, f_type = self.get_type_spec()
read_data = self.read_basic(f_type)
if not read_data:
break
stack_top = self._process_stack[-1]
if stack_top[0] == TState.S_READ_LIST_TYPE:
stack_top[2] -= 1
stack_top[1].append(read_data)
self.pop_stack()
elif stack_top[0] == TState.S_READ_MAP_KEY_TYPE:
if not stack_top[1]:
stack_top[1].append((read_data, None))
else:
if stack_top[1][-1][1] is None:
stack_top[2] -= 1
top_pair = stack_top[1].pop()
stack_top[1].append((top_pair[0], read_data))
self.pop_stack()
else:
stack_top[1].append((read_data, None))
else:
f_name = stack_top[3][0]
setattr(stack_top[1], f_name, read_data)
if self._process_stack[-1][0] in (TState.S_READ_LIST_TYPE,
TState.S_READ_MAP_KEY_TYPE):
f_spec, f_type = self.get_type_spec()
self.next_state(f_type, f_spec=f_spec)
else:
self.current_state = self._process_stack[-1][0]
elif self.current_state == TState.S_READ_STRING_SIZE:
read_data = self.read(4)
if not read_data:
break
self.latest_result = unpack_i32(read_data)
self.current_state = TState.S_READ_STRING
elif self.current_state == TState.S_READ_STRING:
read_data = self.read(self.latest_result)
if not read_data:
break
if self.decode_response:
try:
read_data = read_data.decode('utf-8')
except UnicodeDecodeError:
pass
stack_top = self._process_stack[-1]
if stack_top[0] == TState.S_READ_LIST_TYPE:
stack_top[2] -= 1
stack_top[1].append(read_data)
self.pop_stack()
elif stack_top[0] == TState.S_READ_MAP_KEY_TYPE:
if not stack_top[1]:
stack_top[1].append((read_data, None))
else:
if stack_top[1][-1][1] is None:
stack_top[2] -= 1
top_pair = stack_top[1].pop()
stack_top[1].append((top_pair[0], read_data))
self.pop_stack()
else:
stack_top[1].append((read_data, None))
else:
f_name = stack_top[3][0]
setattr(stack_top[1], f_name, read_data)
if self._process_stack[-1][0] in (TState.S_READ_LIST_TYPE,
TState.S_READ_MAP_KEY_TYPE):
f_spec, f_type = self.get_type_spec()
self.next_state(f_type, f_spec=f_spec)
else:
self.current_state = self._process_stack[-1][0]
elif self.current_state == TState.S_READ_LIST_TYPE:
read_data = self.read(1)
if not read_data:
break
self.latest_result = unpack_i8(read_data)
self.current_state = TState.S_READ_LIST_SIZE
elif self.current_state == TState.S_READ_LIST_SIZE:
read_data = self.read(4)
if not read_data:
break
r_type, size = self.latest_result, unpack_i32(read_data)
spec, _ = self.get_type_spec()
if isinstance(spec, tuple):
v_type, v_spec = spec[0], spec[1]
else:
v_type, v_spec = spec, None
if r_type != v_type:
self.error_code = TError.SKIP_ERROR
else:
self._process_stack.append([TState.S_READ_LIST_TYPE, [],
size, (None, v_spec, v_type)])
if size <= 0:
self.pop_stack()
self.current_state = self._process_stack[-1][0]
else:
self.next_state(v_type, f_spec=v_spec)
elif self.current_state == TState.S_READ_MAP_KEY_TYPE:
read_data = self.read(1)
if not read_data:
break
self.latest_result = unpack_i8(read_data)
self.current_state = TState.S_READ_MAP_VALUE_TYPE
elif self.current_state == TState.S_READ_MAP_VALUE_TYPE:
read_data = self.read(1)
if not read_data:
break
self.latest_result = self.latest_result, unpack_i8(read_data)
self.current_state = TState.S_READ_MAP_SIZE
elif self.current_state == TState.S_READ_MAP_SIZE:
read_data = self.read(4)
if not read_data:
break
size = unpack_i32(read_data)
sk_type, sv_type = self.latest_result
spec, _ = self.get_type_spec()
if isinstance(spec[0], int):
k_type = spec[0]
k_spec = None
else:
k_type, k_spec = spec[0]
if isinstance(spec[1], int):
v_type = spec[1]
v_spec = None
else:
v_type, v_spec = spec[1]
if sk_type != k_type or sv_type != v_type:
self.error_code = TError.SKIP_ERROR
else:
self._process_stack.append([TState.S_READ_MAP_KEY_TYPE, [],
size, (None, k_spec, k_type, v_spec, v_type)])
if size <= 0:
self.pop_stack()
self.current_state = self._process_stack[-1][0]
else:
self.next_state(k_type, f_spec=k_spec)
elif self.current_state == TState.S_PARSE_DONE:
break
else:
self.error_code = TError.INTERNAL_ERROR
return result
| apache-2.0 |
RevelSystems/django | tests/admin_checks/tests.py | 24 | 22025 | from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericStackedInline
from django.core import checks
from django.test import TestCase, override_settings
from .models import Album, Book, City, Influence, Song, State, TwoAlbumFKAndAnE
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class MyAdmin(admin.ModelAdmin):
@classmethod
def check(cls, model, **kwargs):
return ['error!']
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'admin_checks']
)
class SystemChecksTestCase(TestCase):
@override_settings(DEBUG=True)
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
admin.sites.system_check_errors = []
@override_settings(DEBUG=True)
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
admin.sites.system_check_errors = []
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin."),
hint=None,
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields.check(model=Song)
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
Ensure that the fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
Refs #19445.
"""
errors = ValidFormFieldsets.check(model=Song)
self.assertEqual(errors, [])
def test_fieldsets_fields_non_tuple(self):
"""
Tests for a tuple/list within fieldsets[1]['fields'].
"""
class NotATupleAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": "title" # not a tuple
}),
]
errors = NotATupleAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fieldsets[1]['fields']' must be a list or tuple.",
hint=None,
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_nonfirst_fieldset(self):
"""
Tests for a tuple/list within the second fieldsets[2]['fields'].
"""
class NotATupleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
"fields": ("title",)
}),
('foo', {
"fields": "author" # not a tuple
}),
]
errors = NotATupleAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fieldsets[1]['fields']' must be a list or tuple.",
hint=None,
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin.check(model=Album)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin.check(model=Album)
expected = [
checks.Error(
("Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'."),
hint=None,
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
Ensure that a model without a GenericForeignKey raises problems if it's included
in an GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
hint=None,
obj=BookInline,
id='admin.E301',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"A GenericInlineModelAdmin raises problems if the ct_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E302',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"A GenericInlineModelAdmin raises problems if the ct_fk_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E303',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"A GenericInlineModelAdmin raises problems if the ct_field points to a field that isn't part of a GenericForeignKey"
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using content type field 'name' and object ID field 'object_id'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"A GenericInlineModelAdmin raises problems if the ct_fk_field points to a field that isn't part of a GenericForeignKey"
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using content type field 'content_type' and object ID field 'name'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
"""
Regression test for #15669 - Include app label in admin system check messages
"""
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
errors = RawIdNonexistingAdmin.check(model=Album)
expected = [
checks.Error(
("The value of 'raw_id_fields[0]' refers to 'nonexisting', which is "
"not an attribute of 'admin_checks.Album'."),
hint=None,
obj=RawIdNonexistingAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
hint=None,
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'."),
hint=None,
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist'] # Missing attribute
errors = CityInline.check(State)
expected = [
checks.Error(
("The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'."),
hint=None,
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model."),
hint=None,
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model."),
hint=None,
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint=None,
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
def test_list_filter_works_on_through_field_even_when_apps_not_ready(self):
"""
Ensure list_filter can access reverse fields even when the app registry
is not ready; refs #24146.
"""
class BookAdminWithListFilter(admin.ModelAdmin):
list_filter = ['authorsbooks__featured']
# Temporarily pretending apps are not ready yet. This issue can happen
# if the value of 'list_filter' refers to a 'through__field'.
Book._meta.apps.ready = False
try:
errors = BookAdminWithListFilter.check(model=Book)
self.assertEqual(errors, [])
finally:
Book._meta.apps.ready = True
| bsd-3-clause |
cdrttn/samba-regedit | python/samba/tests/samba_tool/base.py | 36 | 4702 | # Unix SMB/CIFS implementation.
# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This provides a wrapper around the cmd interface so that tests can
# easily be built on top of it and have minimal code to run basic tests
# of the commands. A list of the environmental variables can be found in
# ~/selftest/selftest.pl
#
# These can all be accesses via os.environ["VARIBLENAME"] when needed
import random
import string
from samba.auth import system_session
from samba.samdb import SamDB
from cStringIO import StringIO
from samba.netcmd.main import cmd_sambatool
import samba.tests
class SambaToolCmdTest(samba.tests.TestCaseInTempDir):
def getSamDB(self, *argv):
"""a convenience function to get a samdb instance so that we can query it"""
# We build a fake command to get the options created the same
# way the command classes do it. It would be better if the command
# classes had a way to more cleanly do this, but this lets us write
# tests for now
cmd = cmd_sambatool.subcommands["user"].subcommands["setexpiry"]
parser, optiongroups = cmd._create_parser("user")
opts, args = parser.parse_args(list(argv))
# Filter out options from option groups
args = args[1:]
kwargs = dict(opts.__dict__)
for option_group in parser.option_groups:
for option in option_group.option_list:
if option.dest is not None:
del kwargs[option.dest]
kwargs.update(optiongroups)
H = kwargs.get("H", None)
sambaopts = kwargs.get("sambaopts", None)
credopts = kwargs.get("credopts", None)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
return samdb
def runcmd(self, name, *args):
"""run a single level command"""
cmd = cmd_sambatool.subcommands[name]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def runsubcmd(self, name, sub, *args):
"""run a command with sub commands"""
# The reason we need this function separate from runcmd is
# that the .outf StringIO assignment is overriden if we use
# runcmd, so we can't capture stdout and stderr
cmd = cmd_sambatool.subcommands[name].subcommands[sub]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def assertCmdSuccess(self, val, msg=""):
self.assertIsNone(val, msg)
def assertCmdFail(self, val, msg=""):
self.assertIsNotNone(val, msg)
def assertMatch(self, base, string, msg=""):
self.assertTrue(string in base, msg)
def randomName(self, count=8):
"""Create a random name, cap letters and numbers, and always starting with a letter"""
name = random.choice(string.ascii_uppercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 1))
return name
def randomPass(self, count=16):
name = random.choice(string.ascii_uppercase)
name += random.choice(string.digits)
name += random.choice(string.ascii_lowercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 3))
return name
def randomXid(self):
# pick some hopefully unused, high UID/GID range to avoid interference
# from the system the test runs on
xid = random.randint(4711000, 4799000)
return xid
def assertWithin(self, val1, val2, delta, msg=""):
"""Assert that val1 is within delta of val2, useful for time computations"""
self.assertTrue(((val1 + delta) > val2) and ((val1 - delta) < val2), msg)
| gpl-3.0 |
tsacha/rss2email | rss2email/version.py | 9 | 1599 | # Copyright (C) 2013 W. Trevor King <wking@tremily.us>
#
# This file is part of rss2email.
#
# rss2email is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) version 3 of
# the License.
#
# rss2email is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# rss2email. If not, see <http://www.gnu.org/licenses/>.
"""Calculate version numbers for this package and its dependencies
This makes it easier for users to submit useful bug reports.
"""
import importlib as _importlib
import sys as _sys
from . import __version__
def get_rss2email_version(package):
return __version__
def get_python_version(package):
return _sys.version
def get_python_package_version(package):
try:
module = _importlib.import_module(package)
except ImportError as e:
return None
return getattr(module, '__version__', 'unknown')
def get_versions(packages=None):
if not packages:
packages = ['rss2email', 'python', 'feedparser', 'html2text']
for package in packages:
get = globals().get(
'get_{}_version'.format(package),
get_python_package_version)
version = get(package=package)
yield (package, version)
| gpl-2.0 |
jeffzheng1/tensorflow | tensorflow/python/tools/optimize_for_inference.py | 22 | 3970 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes parts of a graph that are only needed for training.
There are several common transformations that can be applied to GraphDefs
created to train a model, that help reduce the amount of computation needed when
the network is used only for inference. These include:
- Removing training-only operations like checkpoint saving.
- Stripping out parts of the graph that are never reached.
- Removing debug operations like CheckNumerics.
- Folding batch normalization ops into the pre-calculated weights.
- Fusing common operations into unified versions.
This script takes either a frozen binary GraphDef file (where the weight
variables have been converted into constants by the freeze_graph script), or a
text GraphDef proto file (the weight variables are stored in a separate
checkpoint file), and outputs a new GraphDef with the optimizations applied.
If the input graph is a text graph file, make sure to include the node that
restores the variable weights in output_names. That node is usually named
"restore_all".
An example of command-line usage is:
bazel build tensorflow/python/tools:optimize_for_inference && \
bazel-bin/tensorflow/python/tools/optimize_for_inference \
--input=frozen_inception_graph.pb \
--output=optimized_inception_graph.pb \
--frozen_graph=True \
--input_names=Mul \
--output_names=softmax
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.tools import optimize_for_inference_lib
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", """TensorFlow 'GraphDef' file to load.""")
flags.DEFINE_string("output", "", """File to save the output graph to.""")
flags.DEFINE_string("input_names", "", """Input node names, comma separated.""")
flags.DEFINE_string("output_names", "",
"""Output node names, comma separated.""")
flags.DEFINE_boolean("frozen_graph", True,
"""If true, the input graph is a binary frozen GraphDef
file; if false, it is a text GraphDef proto file.""")
flags.DEFINE_integer("placeholder_type_enum", tf.float32.as_datatype_enum,
"""The AttrValue enum to use for placeholders.""")
def main(unused_args):
if not tf.gfile.Exists(FLAGS.input):
print("Input graph file '" + FLAGS.input + "' does not exist!")
return -1
input_graph_def = tf.GraphDef()
with tf.gfile.Open(FLAGS.input, "r") as f:
data = f.read()
if FLAGS.frozen_graph:
input_graph_def.ParseFromString(data)
else:
text_format.Merge(data.decode("utf-8"), input_graph_def)
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def,
FLAGS.input_names.split(","),
FLAGS.output_names.split(","), FLAGS.placeholder_type_enum)
if FLAGS.frozen_graph:
f = tf.gfile.FastGFile(FLAGS.output, "w")
f.write(output_graph_def.SerializeToString())
else:
tf.train.write_graph(output_graph_def,
os.path.dirname(FLAGS.output),
os.path.basename(FLAGS.output))
return 0
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
misnyo/searx | searx/engines/searx_engine.py | 4 | 1264 | """
Searx (all)
@website https://github.com/asciimoo/searx
@provide-api yes (https://asciimoo.github.io/searx/dev/search_api.html)
@using-api yes
@results JSON
@stable yes (using api)
@parse url, title, content
"""
from json import loads
from searx.engines import categories as searx_categories
categories = searx_categories.keys()
# search-url
instance_urls = []
instance_index = 0
# do search-request
def request(query, params):
global instance_index
params['url'] = instance_urls[instance_index % len(instance_urls)]
params['method'] = 'POST'
instance_index += 1
params['data'] = {
'q': query,
'pageno': params['pageno'],
'language': params['language'],
'time_range': params['time_range'],
'category': params['category'],
'format': 'json'
}
return params
# get response from search-request
def response(resp):
response_json = loads(resp.text)
results = response_json['results']
for i in ('answers', 'infoboxes'):
results.extend(response_json[i])
results.extend({'suggestion': s} for s in response_json['suggestions'])
results.append({'number_of_results': response_json['number_of_results']})
return results
| agpl-3.0 |
tempbottle/ironpython3 | Src/StdLib/Lib/turtledemo/nim.py | 65 | 6502 | """ turtle-example-suite:
tdemo_nim.py
Play nim against the computer. The player
who takes the last stick is the winner.
Implements the model-view-controller
design pattern.
"""
import turtle
import random
import time
SCREENWIDTH = 640
SCREENHEIGHT = 480
MINSTICKS = 7
MAXSTICKS = 31
HUNIT = SCREENHEIGHT // 12
WUNIT = SCREENWIDTH // ((MAXSTICKS // 5) * 11 + (MAXSTICKS % 5) * 2)
SCOLOR = (63, 63, 31)
HCOLOR = (255, 204, 204)
COLOR = (204, 204, 255)
def randomrow():
return random.randint(MINSTICKS, MAXSTICKS)
def computerzug(state):
xored = state[0] ^ state[1] ^ state[2]
if xored == 0:
return randommove(state)
for z in range(3):
s = state[z] ^ xored
if s <= state[z]:
move = (z, s)
return move
def randommove(state):
m = max(state)
while True:
z = random.randint(0,2)
if state[z] > (m > 1):
break
rand = random.randint(m > 1, state[z]-1)
return z, rand
class NimModel(object):
def __init__(self, game):
self.game = game
def setup(self):
if self.game.state not in [Nim.CREATED, Nim.OVER]:
return
self.sticks = [randomrow(), randomrow(), randomrow()]
self.player = 0
self.winner = None
self.game.view.setup()
self.game.state = Nim.RUNNING
def move(self, row, col):
maxspalte = self.sticks[row]
self.sticks[row] = col
self.game.view.notify_move(row, col, maxspalte, self.player)
if self.game_over():
self.game.state = Nim.OVER
self.winner = self.player
self.game.view.notify_over()
elif self.player == 0:
self.player = 1
row, col = computerzug(self.sticks)
self.move(row, col)
self.player = 0
def game_over(self):
return self.sticks == [0, 0, 0]
def notify_move(self, row, col):
if self.sticks[row] <= col:
return
self.move(row, col)
class Stick(turtle.Turtle):
def __init__(self, row, col, game):
turtle.Turtle.__init__(self, visible=False)
self.row = row
self.col = col
self.game = game
x, y = self.coords(row, col)
self.shape("square")
self.shapesize(HUNIT/10.0, WUNIT/20.0)
self.speed(0)
self.pu()
self.goto(x,y)
self.color("white")
self.showturtle()
def coords(self, row, col):
packet, remainder = divmod(col, 5)
x = (3 + 11 * packet + 2 * remainder) * WUNIT
y = (2 + 3 * row) * HUNIT
return x - SCREENWIDTH // 2 + WUNIT // 2, SCREENHEIGHT // 2 - y - HUNIT // 2
def makemove(self, x, y):
if self.game.state != Nim.RUNNING:
return
self.game.controller.notify_move(self.row, self.col)
class NimView(object):
def __init__(self, game):
self.game = game
self.screen = game.screen
self.model = game.model
self.screen.colormode(255)
self.screen.tracer(False)
self.screen.bgcolor((240, 240, 255))
self.writer = turtle.Turtle(visible=False)
self.writer.pu()
self.writer.speed(0)
self.sticks = {}
for row in range(3):
for col in range(MAXSTICKS):
self.sticks[(row, col)] = Stick(row, col, game)
self.display("... a moment please ...")
self.screen.tracer(True)
def display(self, msg1, msg2=None):
self.screen.tracer(False)
self.writer.clear()
if msg2 is not None:
self.writer.goto(0, - SCREENHEIGHT // 2 + 48)
self.writer.pencolor("red")
self.writer.write(msg2, align="center", font=("Courier",18,"bold"))
self.writer.goto(0, - SCREENHEIGHT // 2 + 20)
self.writer.pencolor("black")
self.writer.write(msg1, align="center", font=("Courier",14,"bold"))
self.screen.tracer(True)
def setup(self):
self.screen.tracer(False)
for row in range(3):
for col in range(self.model.sticks[row]):
self.sticks[(row, col)].color(SCOLOR)
for row in range(3):
for col in range(self.model.sticks[row], MAXSTICKS):
self.sticks[(row, col)].color("white")
self.display("Your turn! Click leftmost stick to remove.")
self.screen.tracer(True)
def notify_move(self, row, col, maxspalte, player):
if player == 0:
farbe = HCOLOR
for s in range(col, maxspalte):
self.sticks[(row, s)].color(farbe)
else:
self.display(" ... thinking ... ")
time.sleep(0.5)
self.display(" ... thinking ... aaah ...")
farbe = COLOR
for s in range(maxspalte-1, col-1, -1):
time.sleep(0.2)
self.sticks[(row, s)].color(farbe)
self.display("Your turn! Click leftmost stick to remove.")
def notify_over(self):
if self.game.model.winner == 0:
msg2 = "Congrats. You're the winner!!!"
else:
msg2 = "Sorry, the computer is the winner."
self.display("To play again press space bar. To leave press ESC.", msg2)
def clear(self):
if self.game.state == Nim.OVER:
self.screen.clear()
class NimController(object):
def __init__(self, game):
self.game = game
self.sticks = game.view.sticks
self.BUSY = False
for stick in self.sticks.values():
stick.onclick(stick.makemove)
self.game.screen.onkey(self.game.model.setup, "space")
self.game.screen.onkey(self.game.view.clear, "Escape")
self.game.view.display("Press space bar to start game")
self.game.screen.listen()
def notify_move(self, row, col):
if self.BUSY:
return
self.BUSY = True
self.game.model.notify_move(row, col)
self.BUSY = False
class Nim(object):
CREATED = 0
RUNNING = 1
OVER = 2
def __init__(self, screen):
self.state = Nim.CREATED
self.screen = screen
self.model = NimModel(self)
self.view = NimView(self)
self.controller = NimController(self)
mainscreen = turtle.Screen()
mainscreen.mode("standard")
mainscreen.setup(SCREENWIDTH, SCREENHEIGHT)
def main():
nim = Nim(mainscreen)
return "EVENTLOOP!"
if __name__ == "__main__":
main()
turtle.mainloop()
| apache-2.0 |
aluedeke/chromedriver | py/selenium/webdriver/common/keys.py | 34 | 2581 | # copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License Version 2.0 = uthe "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Keys implementation.
"""
from __future__ import unicode_literals
class Keys(object):
"""
Set of special keys codes.
"""
NULL = '\ue000'
CANCEL = '\ue001' # ^break
HELP = '\ue002'
BACKSPACE = '\ue003'
BACK_SPACE = BACKSPACE
TAB = '\ue004'
CLEAR = '\ue005'
RETURN = '\ue006'
ENTER = '\ue007'
SHIFT = '\ue008'
LEFT_SHIFT = SHIFT
CONTROL = '\ue009'
LEFT_CONTROL = CONTROL
ALT = '\ue00a'
LEFT_ALT = ALT
PAUSE = '\ue00b'
ESCAPE = '\ue00c'
SPACE = '\ue00d'
PAGE_UP = '\ue00e'
PAGE_DOWN = '\ue00f'
END = '\ue010'
HOME = '\ue011'
LEFT = '\ue012'
ARROW_LEFT = LEFT
UP = '\ue013'
ARROW_UP = UP
RIGHT = '\ue014'
ARROW_RIGHT = RIGHT
DOWN = '\ue015'
ARROW_DOWN = DOWN
INSERT = '\ue016'
DELETE = '\ue017'
SEMICOLON = '\ue018'
EQUALS = '\ue019'
NUMPAD0 = '\ue01a' # number pad keys
NUMPAD1 = '\ue01b'
NUMPAD2 = '\ue01c'
NUMPAD3 = '\ue01d'
NUMPAD4 = '\ue01e'
NUMPAD5 = '\ue01f'
NUMPAD6 = '\ue020'
NUMPAD7 = '\ue021'
NUMPAD8 = '\ue022'
NUMPAD9 = '\ue023'
MULTIPLY = '\ue024'
ADD = '\ue025'
SEPARATOR = '\ue026'
SUBTRACT = '\ue027'
DECIMAL = '\ue028'
DIVIDE = '\ue029'
F1 = '\ue031' # function keys
F2 = '\ue032'
F3 = '\ue033'
F4 = '\ue034'
F5 = '\ue035'
F6 = '\ue036'
F7 = '\ue037'
F8 = '\ue038'
F9 = '\ue039'
F10 = '\ue03a'
F11 = '\ue03b'
F12 = '\ue03c'
META = '\ue03d'
COMMAND = '\ue03d'
| apache-2.0 |
scharron/chardet | chardet/langhungarianmodel.py | 25 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = ( \
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = { \
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = { \
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
| lgpl-2.1 |
lulf/qpid-dispatch | tests/router_policy_test.py | 2 | 13755 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from qpid_dispatch_internal.policy.policy_util import HostAddr, is_ipv6_enabled
from qpid_dispatch_internal.policy.policy_util import HostStruct
from qpid_dispatch_internal.policy.policy_util import PolicyError
from qpid_dispatch_internal.policy.policy_util import PolicyAppConnectionMgr
from qpid_dispatch_internal.policy.policy_local import PolicyLocal
from system_test import TestCase, main_module
class PolicyHostAddrTest(TestCase):
def expect_deny(self, badhostname, msg):
denied = False
try:
xxx = HostStruct(badhostname)
except PolicyError:
denied = True
self.assertTrue(denied, ("%s" % msg))
def check_hostaddr_match(self, tHostAddr, tString, expectOk=True):
# check that the string is a match for the addr
# check that the internal struct version matches, too
ha = HostStruct(tString)
if expectOk:
self.assertTrue( tHostAddr.match_str(tString) )
self.assertTrue( tHostAddr.match_bin(ha) )
else:
self.assertFalse( tHostAddr.match_str(tString) )
self.assertFalse( tHostAddr.match_bin(ha) )
def test_policy_hostaddr_ipv4(self):
# Create simple host and range
aaa = HostAddr("192.168.1.1")
bbb = HostAddr("1.1.1.1,1.1.1.255")
# Verify host and range
self.check_hostaddr_match(aaa, "192.168.1.1")
self.check_hostaddr_match(aaa, "1.1.1.1", False)
self.check_hostaddr_match(aaa, "192.168.1.2", False)
self.check_hostaddr_match(bbb, "1.1.1.1")
self.check_hostaddr_match(bbb, "1.1.1.254")
self.check_hostaddr_match(bbb, "1.1.1.0", False)
self.check_hostaddr_match(bbb, "1.1.2.0", False)
def test_policy_hostaddr_ipv6(self):
if not is_ipv6_enabled():
self.skipTest("System IPv6 support is not available")
# Create simple host and range
aaa = HostAddr("::1")
bbb = HostAddr("::1,::ffff")
ccc = HostAddr("ffff::0,ffff:ffff::0")
# Verify host and range
self.check_hostaddr_match(aaa, "::1")
self.check_hostaddr_match(aaa, "::2", False)
self.check_hostaddr_match(aaa, "ffff:ffff::0", False)
self.check_hostaddr_match(bbb, "::1")
self.check_hostaddr_match(bbb, "::fffe")
self.check_hostaddr_match(bbb, "::1:0", False)
self.check_hostaddr_match(bbb, "ffff::0", False)
self.check_hostaddr_match(ccc, "ffff::1")
self.check_hostaddr_match(ccc, "ffff:fffe:ffff:ffff::ffff")
self.check_hostaddr_match(ccc, "ffff:ffff::1", False)
self.check_hostaddr_match(ccc, "ffff:ffff:ffff:ffff::ffff", False)
def test_policy_hostaddr_ipv4_wildcard(self):
aaa = HostAddr("*")
self.check_hostaddr_match(aaa,"0.0.0.0")
self.check_hostaddr_match(aaa,"127.0.0.1")
self.check_hostaddr_match(aaa,"255.254.253.252")
def test_policy_hostaddr_ipv6_wildcard(self):
if not is_ipv6_enabled():
self.skipTest("System IPv6 support is not available")
aaa = HostAddr("*")
self.check_hostaddr_match(aaa,"::0")
self.check_hostaddr_match(aaa,"::1")
self.check_hostaddr_match(aaa,"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
def test_policy_malformed_hostaddr_ipv4(self):
self.expect_deny( "0.0.0.0.0", "Name or service not known")
self.expect_deny( "1.1.1.1,2.2.2.2,3.3.3.3", "arg count")
self.expect_deny( "9.9.9.9,8.8.8.8", "a > b")
def test_policy_malformed_hostaddr_ipv6(self):
if not is_ipv6_enabled():
self.skipTest("System IPv6 support is not available")
self.expect_deny( "1::2::3", "Name or service not known")
self.expect_deny( "::1,::2,::3", "arg count")
self.expect_deny( "0:ff:0,0:fe:ffff:ffff::0", "a > b")
class QpidDispatch(object):
def qd_dispatch_policy_c_counts_alloc(self):
return 100
def qd_dispatch_policy_c_counts_refresh(self, cstats, entitymap):
pass
class MockAgent(object):
def __init__(self):
self.qd = QpidDispatch()
def add_implementation(self, entity, cfg_obj_name):
pass
class MockPolicyManager(object):
def __init__(self):
self.agent = MockAgent()
def log_debug(self, text):
print("DEBUG: %s" % text)
def log_info(self, text):
print("INFO: %s" % text)
def log_trace(self, text):
print("TRACE: %s" % text)
def log_error(self, text):
print("ERROR: %s" % text)
def get_agent(self):
return self.agent
class PolicyFile(TestCase):
manager = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
def test_policy1_test_zeke_ok(self):
p1 = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 1)
self.assertTrue(p1 == 'test')
upolicy = {}
self.assertTrue(
PolicyFile.policy.lookup_settings('photoserver', p1, upolicy)
)
self.assertTrue(upolicy['maxFrameSize'] == 444444)
self.assertTrue(upolicy['maxMessageSize'] == 444444)
self.assertTrue(upolicy['maxSessionWindow'] == 444444)
self.assertTrue(upolicy['maxSessions'] == 4)
self.assertTrue(upolicy['maxSenders'] == 44)
self.assertTrue(upolicy['maxReceivers'] == 44)
self.assertTrue(upolicy['allowAnonymousSender'])
self.assertTrue(upolicy['allowDynamicSource'])
self.assertTrue(upolicy['targets'] == 'private')
self.assertTrue(upolicy['sources'] == 'private')
def test_policy1_test_zeke_bad_IP(self):
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '10.18.0.1', 'photoserver', "connid", 2) == '')
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '72.135.2.9', 'photoserver', "connid", 3) == '')
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '127.0.0.1', 'photoserver', "connid", 4) == '')
def test_policy1_test_zeke_bad_app(self):
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '192.168.100.5','galleria', "connid", 5) == '')
def test_policy1_test_users_same_permissions(self):
zname = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 6)
yname = PolicyFile.policy.lookup_user('ynot', '10.48.255.254', 'photoserver', '192.168.100.5:33334', 7)
self.assertTrue( zname == yname )
def test_policy1_lookup_unknown_application(self):
upolicy = {}
self.assertFalse(
PolicyFile.policy.lookup_settings('unknown', 'doesntmatter', upolicy)
)
def test_policy1_lookup_unknown_usergroup(self):
upolicy = {}
self.assertFalse(
PolicyFile.policy.lookup_settings('photoserver', 'unknown', upolicy)
)
class PolicyFileApplicationFallback(TestCase):
manager = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
def test_bad_app_fallback(self):
# Show that with no fallback the user cannot connect
self.assertTrue(
self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
# Enable the fallback defaultVhost and show the same user can now connect
self.policy.set_default_vhost('photoserver')
settingsname = self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5)
self.assertTrue(settingsname == 'test')
# Show that the fallback settings are returned
upolicy = {}
self.assertTrue(
self.policy.lookup_settings('phony*app*name', settingsname, upolicy)
)
self.assertTrue(upolicy['maxFrameSize'] == 444444)
self.assertTrue(upolicy['maxMessageSize'] == 444444)
self.assertTrue(upolicy['maxSessionWindow'] == 444444)
self.assertTrue(upolicy['maxSessions'] == 4)
self.assertTrue(upolicy['maxSenders'] == 44)
self.assertTrue(upolicy['maxReceivers'] == 44)
self.assertTrue(upolicy['allowAnonymousSender'])
self.assertTrue(upolicy['allowDynamicSource'])
self.assertTrue(upolicy['targets'] == 'private')
self.assertTrue(upolicy['sources'] == 'private')
# Disable fallback and show failure again
self.policy.set_default_vhost('')
self.assertTrue(
self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
class PolicyAppConnectionMgrTests(TestCase):
def test_policy_app_conn_mgr_fail_by_total(self):
stats = PolicyAppConnectionMgr(1, 2, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('application connection limit' in diags[0])
def test_policy_app_conn_mgr_fail_by_user(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('per user' in diags[0])
def test_policy_app_conn_mgr_fail_by_hosts(self):
stats = PolicyAppConnectionMgr(3, 2, 1)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('per host' in diags[0])
def test_policy_app_conn_mgr_fail_by_user_hosts(self):
stats = PolicyAppConnectionMgr(3, 1, 1)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 2)
self.assertTrue('per user' in diags[0] or 'per user' in diags[1])
self.assertTrue('per host' in diags[0] or 'per host' in diags[1])
def test_policy_app_conn_mgr_update(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('per user' in diags[0])
diags = []
stats.update(3, 2, 2)
self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
def test_policy_app_conn_mgr_disconnect(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('per user' in diags[0])
diags = []
stats.disconnect("10.10.10.10:10000", 'chuck', '10.10.10.10')
self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
def test_policy_app_conn_mgr_create_bad_settings(self):
denied = False
try:
stats = PolicyAppConnectionMgr(-3, 1, 2)
except PolicyError:
denied = True
self.assertTrue(denied, "Failed to detect negative setting value.")
def test_policy_app_conn_mgr_update_bad_settings(self):
denied = False
try:
stats = PolicyAppConnectionMgr(0, 0, 0)
except PolicyError:
denied = True
self.assertFalse(denied, "Should allow all zeros.")
try:
stats.update(0, -1, 0)
except PolicyError:
denied = True
self.assertTrue(denied, "Failed to detect negative setting value.")
def test_policy_app_conn_mgr_larger_counts(self):
stats = PolicyAppConnectionMgr(10000, 10000, 10000)
diags = []
for i in range(0, 10000):
self.assertTrue(stats.can_connect('1.1.1.1:' + str(i), 'chuck', '1.1.1.1', diags))
self.assertTrue(len(diags) == 0)
self.assertFalse(stats.can_connect('1.1.1.1:10000', 'chuck', '1.1.1.1', diags))
self.assertTrue(len(diags) == 3)
self.assertTrue(stats.connections_active == 10000)
self.assertTrue(stats.connections_approved == 10000)
self.assertTrue(stats.connections_denied == 1)
if __name__ == '__main__':
unittest.main(main_module())
| apache-2.0 |
botswana-harvard/edc-sync | edc_sync/tests/models.py | 1 | 3084 | from edc_base.model_managers import HistoricalRecords
from edc_base.model_mixins.list_model_mixin import ListModelMixin
from uuid import uuid4
from django.db import models
from django.db.models.deletion import PROTECT
from edc_base.model_mixins import BaseUuidModel, BaseModel
from edc_base.utils import get_utcnow
from simple_history.models import HistoricalRecords as BadHistoricalRecords
class TestModelManager(models.Manager):
def get_by_natural_key(self, f1):
return self.get(f1=f1)
class TestModel(BaseUuidModel):
f1 = models.CharField(max_length=10, unique=True)
f2 = models.CharField(max_length=10, null=True)
f3 = models.CharField(max_length=10, default=uuid4())
objects = TestModelManager()
history = HistoricalRecords()
def natural_key(self):
return (self.f1,)
class TestModelDates(BaseUuidModel):
f1 = models.CharField(max_length=10, unique=True)
f2 = models.DateField(
blank=True,
null=True)
f3 = models.DateTimeField(
blank=True,
default=get_utcnow)
objects = TestModelManager()
def natural_key(self):
return (self.f1,)
class BadTestModel(BaseUuidModel):
"""A test model that is missing natural_key and get_by_natural_key.
"""
f1 = models.CharField(max_length=10, default='f1')
objects = models.Manager()
class AnotherBadTestModel(BaseUuidModel):
"""A test model that is missing get_by_natural_key.
"""
f1 = models.CharField(max_length=10, default='f1')
objects = models.Manager()
def natural_key(self):
return (self.f1,)
class YetAnotherBadTestModel(BaseUuidModel):
"""A test model with the wrong HistoricalManager.
"""
f1 = models.CharField(max_length=10, default='f1')
objects = TestModelManager()
history = BadHistoricalRecords() # missing UUID handling
def natural_key(self):
return (self.f1,)
class TestSyncModelNoHistoryManager(BaseUuidModel):
"""A test model without a HistoricalManager.
"""
f1 = models.CharField(max_length=10, default='f1')
objects = TestModelManager()
def natural_key(self):
return (self.f1,)
class TestSyncModelNoUuid(BaseModel):
"""A test model without a HistoricalManager.
"""
f1 = models.CharField(max_length=10, default='f1')
objects = TestModelManager()
def natural_key(self):
return (self.f1,)
class M2m(ListModelMixin, BaseUuidModel):
pass
class TestModelWithFkProtected(BaseUuidModel):
f1 = models.CharField(max_length=10, unique=True)
test_model = models.ForeignKey(TestModel, on_delete=PROTECT)
objects = TestModelManager()
history = HistoricalRecords()
def natural_key(self):
return (self.f1,)
natural_key.dependencies = ['edc_sync.test_model']
class TestModelWithM2m(BaseUuidModel):
f1 = models.CharField(max_length=10, unique=True)
m2m = models.ManyToManyField(M2m)
objects = TestModelManager()
history = HistoricalRecords()
def natural_key(self):
return (self.f1,)
| gpl-2.0 |
bop/hybrid | lib/python2.6/site-packages/django/contrib/gis/tests/geo3d/models.py | 222 | 2064 | from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class City3D(models.Model):
name = models.CharField(max_length=30)
point = models.PointField(dim=3)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Interstate2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=4269)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Interstate3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=4269)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class InterstateProj2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class InterstateProj3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=32140)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Polygon2D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(srid=32140)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Polygon3D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(dim=3, srid=32140)
objects = models.GeoManager()
def __str__(self):
return self.name
class Point2D(models.Model):
point = models.PointField()
objects = models.GeoManager()
class Point3D(models.Model):
point = models.PointField(dim=3)
objects = models.GeoManager()
class MultiPoint3D(models.Model):
mpoint = models.MultiPointField(dim=3)
objects = models.GeoManager()
| gpl-2.0 |
lianwutech/plugin_linkworld-discard- | channels/serial_rtu.py | 1 | 8808 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
串口RTU通道类
通过调用管理类对象的process_data函数实现信息的发送。
"""
import logging
import serial
from pymodbus.client.sync import ModbusSerialClient
from libs.modbus_define import *
from libs.base_channel import BaseChannel
logger = logging.getLogger('linkworld')
class SerialRtuChannel(BaseChannel):
def __init__(self, network, channel_name, channel_protocol, channel_params, channel_manager, channel_type):
self.port = channel_params.get("port", "")
self.stopbits = channel_params.get("stopbits", serial.STOPBITS_ONE)
self.parity = channel_params.get("parity", serial.PARITY_NONE)
self.bytesize = channel_params.get("bytesize", serial.EIGHTBITS)
self.baudrate = channel_params.get("baudrate", 9600)
self.timeout = channel_params.get("timeout", 2)
self.modbus_client = None
BaseChannel.__init__(self, network, channel_name, channel_protocol, channel_params, channel_manager,
channel_type)
def run(self):
self.modbus_client = ModbusSerialClient(method='rtu',
port=self.port,
baudrate=self.baudrate,
stopbits=self.stopbits,
parity=self.parity,
bytesize=self.bytesize,
timeout=self.timeout)
try:
self.modbus_client.connect()
logger.debug("连接串口成功.")
except Exception, e:
logger.error("连接串口失败,错误信息:%r." % e)
self.modbus_client = None
def isAlive(self):
return True
def send_cmd(self, device_info, device_cmd):
if self.modbus_client is None:
logger.error("modbus client is not connect.")
device_data = None
return False
if device_cmd["func_code"] == const.fc_read_coils or device_cmd["func_code"] == const.fc_read_discrete_inputs:
req_result = self.modbus_client.read_coils(device_cmd["addr"],
device_cmd["count"],
unit=int(device_info["device_addr"]))
if req_result is None:
logger.error("device_cmd(%r) retun None." % device_cmd)
device_data = None
else:
device_data = {
"func_code": device_cmd["func_code"],
"addr": device_cmd["addr"],
"count": device_cmd["count"],
"values": req_result.bits
}
elif device_cmd["func_code"] == const.fc_write_coil:
req_result = self.modbus_client.write_coil(device_cmd["addr"],
device_cmd["value"],
unit=int(device_info["device_addr"]))
res_result = self.modbus_client.read_coils(device_cmd["addr"],
1,
unit=int(device_info["device_addr"]))
if res_result is None:
logger.error("device_cmd(%r) retun None." % device_cmd)
device_data = None
else:
device_data = {
"func_code": device_cmd["func_code"],
"addr": device_cmd["addr"],
"count": 1,
"values": res_result.bits[0:1]
}
elif device_cmd["func_code"] == const.fc_write_coils:
req_result = self.modbus_client.write_coils(device_cmd["addr"],
device_cmd["values"],
unit=int(device_info["device_addr"]))
counter = len(device_cmd["values"])
res_result = self.modbus_client.read_coils(device_cmd["addr"],
counter,
unit=int(device_info["device_addr"]))
if res_result is None:
logger.error("device_cmd(%r) retun None." % device_cmd)
device_data = None
else:
device_data = {
"func_code": device_cmd["func_code"],
"addr": device_cmd["addr"],
"count": counter,
"values": res_result.bits
}
elif device_cmd["func_code"] == const.fc_write_register:
req_result = self.modbus_client.write_register(device_cmd["addr"],
device_cmd["value"],
unit=int(device_info["device_addr"]))
res_result = self.modbus_client.read_holding_registers(device_cmd["addr"],
1,
unit=int(device_info["device_addr"]))
if res_result is None:
logger.error("device_cmd(%r) retun None." % device_cmd)
device_data = None
else:
device_data = {
"func_code": device_cmd["func_code"],
"addr": device_cmd["addr"],
"count": 1,
"values": res_result.registers[0:1]
}
elif device_cmd["func_code"] == const.fc_write_registers:
result = self.modbus_client.write_registers(device_cmd["addr"],
device_cmd["values"],
unit=int(device_info["device_addr"]))
counter = len(device_cmd["values"])
res_result = self.modbus_client.read_input_registers(device_cmd["addr"],
counter,
unit=int(device_info["device_addr"]))
if res_result is None:
logger.error("device_cmd(%r) retun None." % device_cmd)
device_data = None
else:
device_data = {
"func_code": device_cmd["func_code"],
"addr": device_cmd["addr"],
"count": counter,
"values": res_result.registers
}
elif device_cmd["func_code"] == const.fc_read_holding_registers:
res_result = self.modbus_client.read_holding_registers(device_cmd["addr"],
device_cmd["count"],
unit=int(device_info["device_addr"]))
if res_result is None:
logger.error("device_cmd(%r) retun None." % device_cmd)
device_data = None
else:
device_data = {
"func_code": device_cmd["func_code"],
"addr": device_cmd["addr"],
"count": device_cmd["count"],
"values": res_result.registers
}
elif device_cmd["func_code"] == const.fc_read_input_registers:
res_result = self.modbus_client.read_input_registers(device_cmd["addr"],
device_cmd["count"],
unit=int(device_info["device_addr"]))
if res_result is None:
logger.error("device_cmd(%r) retun None." % device_cmd)
device_data = None
else:
device_data = {
"func_code": device_cmd["func_code"],
"addr": device_cmd["addr"],
"count": device_cmd["count"],
"values": res_result.registers
}
else:
logger.error("不支持的modbus指令:%d" % device_cmd["func_code"])
device_data = None
data_msg = device_data
return self.channel_manager.process_data(self.network_name,
self.channel_name,
self.channel_protocol,
device_info["device_id"],
data_msg)
| apache-2.0 |
jamesleesaunders/pi-hive | scripts/setup-db.py | 2 | 1428 | #!/usr/bin/python
# coding: utf-8
# Filename: setup-db.py
# Description: Sets up DB used by zigbeehub.py
# Author: James Saunders [james@saunders-family.net]
# Copyright: Copyright (C) 2017 James Saunders
# License: MIT
import sqlite3
# Setup DB Connection
conn = sqlite3.connect('nodes.db')
print "Database successfully created"
# Create Nodes Table
conn.execute('''CREATE TABLE Nodes (
Id INTEGER PRIMARY KEY AUTOINCREMENT,
AddressLong BLOB NOT NULL,
AddressShort BLOB,
Name CHAR(50) DEFAULT '',
Type CHAR(50) DEFAULT '',
Version INTEGER,
Manufacturer CHAR(50) DEFAULT '',
ManufactureDate CHAR(10) DEFAULT '',
FirstSeen DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
LastSeen DATETIME,
MessagesReceived INT DEFAULT 0
);''')
conn.execute('''CREATE UNIQUE INDEX AddressLong on Nodes (AddressLong);''')
print "Nodes table successfully created"
# Create Attributes Table
conn.execute('''CREATE TABLE Attributes (
Id INTEGER PRIMARY KEY AUTOINCREMENT,
NodeId INTEGER,
Name CHAR(20) NOT NULL,
Value CHAR(50) NOT NULL,
Time DATETIME NOT NULL
);''')
conn.execute('''CREATE UNIQUE INDEX Attribute on Attributes (NodeId, Name, Value, Time);''')
print "Attributes table successfully created"
# Close Up Shop
conn.close() | mit |
Denisolt/IEEE-NYIT-MA | local/lib/python2.7/site-packages/unidecode/__init__.py | 42 | 3095 | # -*- coding: utf-8 -*-
# vi:tabstop=4:expandtab:sw=4
"""Transliterate Unicode text into plain 7-bit ASCII.
Example usage:
>>> from unidecode import unidecode:
>>> unidecode(u"\u5317\u4EB0")
"Bei Jing "
The transliteration uses a straightforward map, and doesn't have alternatives
for the same character based on language, position, or anything else.
In Python 3, a standard string object will be returned. If you need bytes, use:
>>> unidecode("Κνωσός").encode("ascii")
b'Knosos'
"""
import warnings
from sys import version_info
Cache = {}
def _warn_if_not_unicode(string):
if version_info[0] < 3 and not isinstance(string, unicode):
warnings.warn( "Argument %r is not an unicode object. "
"Passing an encoded string will likely have "
"unexpected results." % (type(string),),
RuntimeWarning, 2)
def unidecode_expect_ascii(string):
"""Transliterate an Unicode object into an ASCII string
>>> unidecode(u"\u5317\u4EB0")
"Bei Jing "
This function first tries to convert the string using ASCII codec.
If it fails (because of non-ASCII characters), it falls back to
transliteration using the character tables.
This is approx. five times faster if the string only contains ASCII
characters, but slightly slower than using unidecode directly if non-ASCII
chars are present.
"""
_warn_if_not_unicode(string)
try:
bytestring = string.encode('ASCII')
except UnicodeEncodeError:
return _unidecode(string)
if version_info[0] >= 3:
return string
else:
return bytestring
def unidecode_expect_nonascii(string):
"""Transliterate an Unicode object into an ASCII string
>>> unidecode(u"\u5317\u4EB0")
"Bei Jing "
"""
_warn_if_not_unicode(string)
return _unidecode(string)
unidecode = unidecode_expect_ascii
def _unidecode(string):
retval = []
for char in string:
codepoint = ord(char)
if codepoint < 0x80: # Basic ASCII
retval.append(str(char))
continue
if codepoint > 0xeffff:
continue # Characters in Private Use Area and above are ignored
if 0xd800 <= codepoint <= 0xdfff:
warnings.warn( "Surrogate character %r will be ignored. "
"You might be using a narrow Python build." % (char,),
RuntimeWarning, 2)
section = codepoint >> 8 # Chop off the last two hex digits
position = codepoint % 256 # Last two hex digits
try:
table = Cache[section]
except KeyError:
try:
mod = __import__('unidecode.x%03x'%(section), globals(), locals(), ['data'])
except ImportError:
Cache[section] = None
continue # No match: ignore this character and carry on.
Cache[section] = table = mod.data
if table and len(table) > position:
retval.append( table[position] )
return ''.join(retval)
| gpl-3.0 |
HuaweiSwitch/ansible | lib/ansible/modules/network/nxos/nxos_udld.py | 57 | 8727 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_udld
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages UDLD global configuration params.
description:
- Manages UDLD global configuration params.
author:
- Jason Edelman (@jedelman8)
notes:
- When C(state=absent), it unconfigures existing settings C(msg_time) and set it
to its default value of 15. It is cleaner to always use C(state=present).
- Module will fail if the udld feature has not been previously enabled.
options:
aggressive:
description:
- Toggles aggressive mode.
required: false
default: null
choices: ['enabled','disabled']
msg_time:
description:
- Message time in seconds for UDLD packets.
required: false
default: null
reset:
description:
- Ability to reset UDLD down interfaces.
required: false
default: null
choices: ['true','false']
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure udld aggressive mode is globally disabled and se global message interval is 20
- nxos_udld:
aggressive: disabled
msg_time: 20
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Ensure agg mode is globally enabled and msg time is 15
- nxos_udld:
aggressive: enabled
msg_time: 15
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
existing:
description:
- k/v pairs of existing udld configuration
returned: always
type: dict
sample: {"aggressive": "disabled", "msg_time": "15"}
end_state:
description: k/v pairs of udld configuration after module execution
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["udld message-time 40", "udld aggressive"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_commands_config_udld_global(delta, reset):
config_args = {
'enabled': 'udld aggressive',
'disabled': 'no udld aggressive',
'msg_time': 'udld message-time {msg_time}'
}
commands = []
for param, value in delta.items():
if param == 'aggressive':
if value == 'enabled':
command = 'udld aggressive'
elif value == 'disabled':
command = 'no udld aggressive'
else:
command = config_args.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
if reset:
command = 'udld reset'
commands.append(command)
return commands
def get_commands_remove_udld_global(delta):
config_args = {
'aggressive': 'no udld aggressive',
'msg_time': 'no udld message-time {msg_time}',
}
commands = []
for param, value in delta.items():
command = config_args.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
return commands
def get_udld_global(module):
command = 'show udld global'
udld_table = execute_show_command(command, module)[0]
status = str(udld_table.get('udld-global-mode', None))
if status == 'enabled-aggressive':
aggressive = 'enabled'
else:
aggressive = 'disabled'
interval = str(udld_table.get('message-interval', None))
udld = dict(msg_time=interval, aggressive=aggressive)
return udld
def main():
argument_spec = dict(
aggressive=dict(required=False, choices=['enabled', 'disabled']),
msg_time=dict(required=False, type='str'),
reset=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['aggressive', 'msg_time', 'reset']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
aggressive = module.params['aggressive']
msg_time = module.params['msg_time']
reset = module.params['reset']
state = module.params['state']
if (aggressive or reset) and state == 'absent':
module.fail_json(msg="It's better to use state=present when "
"configuring or unconfiguring aggressive mode "
"or using reset flag. state=absent is just for "
"when using msg_time param.")
if msg_time:
try:
msg_time_int = int(msg_time)
if msg_time_int < 7 or msg_time_int > 90:
raise ValueError
except ValueError:
module.fail_json(msg='msg_time must be an integer'
'between 7 and 90')
args = dict(aggressive=aggressive, msg_time=msg_time, reset=reset)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_udld_global(module)
end_state = existing
delta = set(proposed.items()).difference(existing.items())
changed = False
commands = []
if state == 'present':
if delta:
command = get_commands_config_udld_global(dict(delta), reset)
commands.append(command)
elif state == 'absent':
common = set(proposed.items()).intersection(existing.items())
if common:
command = get_commands_remove_udld_global(dict(common))
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_udld_global(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.