repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
gkarlin/django-jenkins | build/pylint/test/input/func_noerror_new_style_class_py_30.py | 4 | 1118 | """check builtin data descriptors such as mode and name attributes
on a file are correctly handled
bug notified by Pierre Rouleau on 2005-04-24
"""
__revision__ = None
class File(file):
""" Testing new-style class inheritance from file"""
#
def __init__(self, name, mode="r", buffering=-1, verbose=False):
"""Constructor"""
self.was_modified = False
self.verbose = verbose
super(File, self).__init__(name, mode, buffering)
if self.verbose:
print "File %s is opened. The mode is: %s" % (self.name,
self.mode)
#
def write(self, a_string):
""" Write a string to the file."""
super(File, self).write(a_string)
self.was_modified = True
#
def writelines(self, sequence):
""" Write a sequence of strings to the file. """
super(File, self).writelines(sequence)
self.was_modified = True
#
def close(self) :
"""Close the file."""
if self.verbose:
print "Closing file %s" % self.name
super(File, self).close()
self.was_modified = False
| lgpl-3.0 |
40223119/2015cd_0505 | static/Brython3.1.1-20150328-091302/Lib/unittest/__init__.py | 900 | 2718 | """
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmeticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1)
def testMultiply(self):
self.assertEqual((0 * 10), 0)
self.assertEqual((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/library/unittest.html
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', 'installHandler',
'registerResult', 'removeResult', 'removeHandler']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
__unittest = True
from .result import TestResult
from .case import (TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure)
from .suite import BaseTestSuite, TestSuite
from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases)
from .main import TestProgram, main
from .runner import TextTestRunner, TextTestResult
from .signals import installHandler, registerResult, removeResult, removeHandler
# deprecated
_TextTestResult = TextTestResult
| agpl-3.0 |
proxysh/Safejumper-for-Desktop | buildmac/Resources/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py | 713 | 9596 | from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| gpl-2.0 |
google/tmppy | _py2tmp/ir0_optimization/_expression_simplification.py | 1 | 17752 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
from _py2tmp.ir0 import ir, Transformation, is_expr_variadic, GlobalLiterals, select1st_literal
from _py2tmp.ir0_optimization._compute_non_expanded_variadic_vars import compute_non_expanded_variadic_vars
from _py2tmp.ir0_optimization._recalculate_template_instantiation_can_trigger_static_asserts_info import expr_can_trigger_static_asserts
class ExpressionSimplificationTransformation(Transformation):
def __init__(self) -> None:
super().__init__()
self.in_variadic_type_expansion = False
def transform_not_expr(self, not_expr: ir.NotExpr) -> ir.Expr:
expr = self.transform_expr(not_expr.inner_expr)
# !true => false
# !false => true
if isinstance(expr, ir.Literal):
assert isinstance(expr.value, bool)
return ir.Literal(not expr.value)
# !!x => x
if isinstance(expr, ir.NotExpr):
return expr.inner_expr
# !(x && y) => (!x || !y)
# !(x || y) => (!x && !y)
if isinstance(expr, ir.BoolBinaryOpExpr):
op = {
'&&': '||',
'||': '&&',
}[expr.op]
return self.transform_expr(
ir.BoolBinaryOpExpr(lhs=ir.NotExpr(expr.lhs), rhs=ir.NotExpr(expr.rhs), op=op))
# !(x == y) => x != y
# !(x != y) => x == y
# !(x < y) => x >= y
# !(x <= y) => x > y
# !(x > y) => x <= y
# !(x >= y) => x < y
if isinstance(expr, ir.ComparisonExpr) and expr.op in ('==', '!='):
op = {
'==': '!=',
'!=': '==',
'<': '>=',
'<=': '>',
'>': '<=',
'>=': '<',
}[expr.op]
return ir.ComparisonExpr(expr.lhs, expr.rhs, op)
return ir.NotExpr(expr)
def transform_unary_minus_expr(self, unary_minus: ir.UnaryMinusExpr) -> ir.Expr:
expr = self.transform_expr(unary_minus.inner_expr)
# -(3) => -3
if isinstance(expr, ir.Literal):
assert isinstance(expr.value, int)
return ir.Literal(-expr.value)
# -(x - y) => y - x
if isinstance(expr, ir.Int64BinaryOpExpr) and expr.op == '-':
return ir.Int64BinaryOpExpr(lhs=expr.rhs, rhs=expr.lhs, op='-')
return ir.UnaryMinusExpr(expr)
def transform_int64_binary_op_expr(self, binary_op: ir.Int64BinaryOpExpr) -> ir.Expr:
lhs = binary_op.lhs
rhs = binary_op.rhs
op = binary_op.op
# (x - y) => (x + -y)
# This pushes down the minus, so that e.g. (x - (-y)) => (x + y).
if op == '-':
rhs = ir.UnaryMinusExpr(rhs)
op = '+'
lhs = self.transform_expr(lhs)
rhs = self.transform_expr(rhs)
if op == '+' and isinstance(rhs, ir.UnaryMinusExpr):
# We could not push down the minus, so switch back to a subtraction.
op = '-'
rhs = rhs.inner_expr
if op == '+':
# 3 + 5 => 8
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value + rhs.value)
# 0 + x => x
if isinstance(lhs, ir.Literal) and lhs.value == 0:
return rhs
# x + 0 => x
if isinstance(rhs, ir.Literal) and rhs.value == 0:
return lhs
if op == '-':
# 8 - 5 => 3
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value - rhs.value)
# 0 - x => -x
if isinstance(lhs, ir.Literal) and lhs.value == 0:
return ir.UnaryMinusExpr(rhs)
# x - 0 => x
if isinstance(rhs, ir.Literal) and rhs.value == 0:
return lhs
if op == '*':
# 3 * 5 => 15
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value * rhs.value)
# 0 * x => 0
if isinstance(lhs, ir.Literal) and lhs.value == 0:
if self._can_remove_subexpression(rhs):
return ir.Literal(0)
# x * 0 => 0
if isinstance(rhs, ir.Literal) and rhs.value == 0:
if self._can_remove_subexpression(lhs):
return ir.Literal(0)
# 1 * x => x
if isinstance(lhs, ir.Literal) and lhs.value == 1:
return rhs
# x * 1 => x
if isinstance(rhs, ir.Literal) and rhs.value == 1:
return lhs
if op == '/':
# 16 / 3 => 5
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value // rhs.value)
# x / 1 => x
if isinstance(rhs, ir.Literal) and rhs.value == 1:
return lhs
if op == '%':
# 16 % 3 => 1
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value % rhs.value)
# x % 1 => 0
if isinstance(rhs, ir.Literal) and rhs.value == 1:
return ir.Literal(0)
return ir.Int64BinaryOpExpr(lhs, rhs, op)
def transform_bool_binary_op_expr(self, binary_op: ir.BoolBinaryOpExpr) -> ir.Expr:
lhs = binary_op.lhs
rhs = binary_op.rhs
op = binary_op.op
lhs = self.transform_expr(lhs)
rhs = self.transform_expr(rhs)
if op == '&&':
# true && false => false
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value and rhs.value)
# true && x => x
if isinstance(lhs, ir.Literal) and lhs.value is True:
return rhs
# x && true => x
if isinstance(rhs, ir.Literal) and rhs.value is True:
return lhs
# false && x => false
if isinstance(lhs, ir.Literal) and lhs.value is False:
if self._can_remove_subexpression(rhs):
return ir.Literal(False)
# x && false => false
if isinstance(rhs, ir.Literal) and rhs.value is False:
if self._can_remove_subexpression(lhs):
return ir.Literal(False)
if op == '||':
# true || false => true
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value or rhs.value)
# false || x => x
if isinstance(lhs, ir.Literal) and lhs.value is False:
return rhs
# x || false => x
if isinstance(rhs, ir.Literal) and rhs.value is False:
return lhs
# true || x => true
if isinstance(lhs, ir.Literal) and lhs.value is True:
if self._can_remove_subexpression(rhs):
return ir.Literal(True)
# x || true => true
if isinstance(rhs, ir.Literal) and rhs.value is True:
if self._can_remove_subexpression(lhs):
return ir.Literal(True)
return ir.BoolBinaryOpExpr(lhs, rhs, op)
def transform_comparison_expr(self, comparison: ir.ComparisonExpr) -> ir.Expr:
lhs = comparison.lhs
rhs = comparison.rhs
op = comparison.op
lhs = self.transform_expr(lhs)
rhs = self.transform_expr(rhs)
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
if op == '==':
return ir.Literal(lhs.value == rhs.value)
if op == '!=':
return ir.Literal(lhs.value != rhs.value)
if op == '<':
return ir.Literal(lhs.value < rhs.value)
if op == '<=':
return ir.Literal(lhs.value <= rhs.value)
if op == '>':
return ir.Literal(lhs.value > rhs.value)
if op == '>=':
return ir.Literal(lhs.value >= rhs.value)
if op in ('==', '!=') and self._is_syntactically_equal(lhs, rhs) and not expr_can_trigger_static_asserts(lhs):
if self._can_remove_subexpression(lhs) and self._can_remove_subexpression(rhs):
return {
'==': ir.Literal(True),
'!=': ir.Literal(False),
}[op]
if op in ('==', '!=') and isinstance(rhs, ir.Literal) and rhs.expr_type == ir.BoolType():
rhs, lhs = lhs, rhs
if op in ('==', '!=') and isinstance(lhs, ir.Literal) and lhs.expr_type == ir.BoolType():
return {
('==', True): lambda: rhs,
('==', False): lambda: self.transform_expr(ir.NotExpr(rhs)),
('!=', True): lambda: self.transform_expr(ir.NotExpr(rhs)),
('!=', False): lambda: rhs,
}[(op, lhs.value)]()
return ir.ComparisonExpr(lhs, rhs, op)
def transform_static_assert(self, static_assert: ir.StaticAssert):
expr = self.transform_expr(static_assert.expr)
if isinstance(expr, ir.Literal) and expr.value is True:
return
self.writer.write(ir.StaticAssert(expr=expr,
message=static_assert.message))
def _is_syntactically_equal(self, lhs: ir.Expr, rhs: ir.Expr):
if not lhs.is_same_expr_excluding_subexpressions(rhs):
return False
lhs_exprs = list(lhs.direct_subexpressions)
rhs_exprs = list(rhs.direct_subexpressions)
if len(lhs_exprs) != len(rhs_exprs):
return False
return all(self._is_syntactically_equal(lhs_expr, rhs_expr)
for lhs_expr, rhs_expr in zip(lhs_exprs, rhs_exprs))
def transform_variadic_type_expansion(self, expr: ir.VariadicTypeExpansion):
old_in_variadic_type_expansion = self.in_variadic_type_expansion
self.in_variadic_type_expansion = True
result = super().transform_variadic_type_expansion(expr)
self.in_variadic_type_expansion = old_in_variadic_type_expansion
return result
def transform_class_member_access(self, class_member_access: ir.ClassMemberAccess):
if (isinstance(class_member_access.inner_expr, ir.TemplateInstantiation)
and isinstance(class_member_access.inner_expr.template_expr, ir.AtomicTypeLiteral)):
if class_member_access.inner_expr.template_expr.cpp_type == 'GetFirstError':
args = self.transform_exprs(class_member_access.inner_expr.args, original_parent_element=class_member_access.inner_expr)
return self.transform_get_first_error(args)
if class_member_access.inner_expr.template_expr.cpp_type == 'std::is_same':
args = self.transform_exprs(class_member_access.inner_expr.args, original_parent_element=class_member_access.inner_expr)
return self.transform_is_same(args)
if class_member_access.inner_expr.template_expr.cpp_type.startswith('Select1st'):
args = self.transform_exprs(class_member_access.inner_expr.args, original_parent_element=class_member_access.inner_expr)
return self.transform_select1st(args)
return super().transform_class_member_access(class_member_access)
def _can_remove_subexpression(self, expr: ir.Expr):
# If we're in a variadic type expr, we can't remove variadic sub-exprs (not in general at least).
# E.g. BoolList<(F<Ts>::value || true)...> can't be optimized to BoolList<true>
if self.in_variadic_type_expansion and is_expr_variadic(expr):
return False
return True
def transform_get_first_error(self, args: Tuple[ir.Expr, ...]):
new_args = []
for arg in args:
if isinstance(arg, ir.AtomicTypeLiteral) and arg.cpp_type == 'void':
pass
elif (isinstance(arg, ir.VariadicTypeExpansion)
and isinstance(arg.inner_expr, ir.ClassMemberAccess)
and isinstance(arg.inner_expr.inner_expr, ir.TemplateInstantiation)
and isinstance(arg.inner_expr.inner_expr.template_expr, ir.AtomicTypeLiteral)
and arg.inner_expr.inner_expr.template_expr.cpp_type.startswith('Select1stType')
and len(arg.inner_expr.inner_expr.args) == 2
and isinstance(arg.inner_expr.inner_expr.args[0], ir.AtomicTypeLiteral)
and arg.inner_expr.inner_expr.args[0].cpp_type == 'void'):
# Select1stType*<void, expr>...
pass
else:
new_args.append(arg)
return ir.ClassMemberAccess(inner_expr=ir.TemplateInstantiation(template_expr=GlobalLiterals.GET_FIRST_ERROR,
args=tuple(new_args),
instantiation_might_trigger_static_asserts=False),
expr_type=ir.TypeType(),
member_name='type')
def transform_is_same(self, args: Tuple[ir.Expr, ...]):
assert len(args) == 2
lhs, rhs = args
list_template_names = {'List', 'BoolList', 'Int64List'}
if (isinstance(lhs, ir.TemplateInstantiation) and isinstance(lhs.template_expr, ir.AtomicTypeLiteral) and lhs.template_expr.cpp_type in list_template_names
and isinstance(rhs, ir.TemplateInstantiation) and isinstance(rhs.template_expr, ir.AtomicTypeLiteral) and rhs.template_expr.cpp_type in list_template_names
and lhs.template_expr.cpp_type == rhs.template_expr.cpp_type
and not any(isinstance(arg, ir.VariadicTypeExpansion) for arg in lhs.args)
and not any(isinstance(arg, ir.VariadicTypeExpansion) for arg in rhs.args)
and len(lhs.args) == len(rhs.args)
and lhs.args):
# std::is_same<List<X1, X2, X3>, List<Y1, Y2, Y3>>::value
# -> std::is_same<X1, Y1>::value && std::is_same<X2, Y2>::value && std::is_same<X3, Y3>::value
if lhs.template_expr.cpp_type == 'List':
result = None
for lhs_arg, rhs_arg in zip(lhs.args, rhs.args):
if result:
result = ir.BoolBinaryOpExpr(lhs=result,
rhs=self._create_is_same_expr(lhs_arg, rhs_arg),
op='&&')
else:
result = self._create_is_same_expr(lhs_arg, rhs_arg)
return self.transform_expr(result)
# std::is_same<IntList<n1, n2, n3>, IntList<m1, m2, m3>>::value
# -> (n1 == m1) && (n2 == m2) && (n3 == m3)
# (and same for BoolList)
result = None
for lhs_arg, rhs_arg in zip(lhs.args, rhs.args):
if result:
result = ir.BoolBinaryOpExpr(lhs=result,
rhs=ir.ComparisonExpr(lhs_arg, rhs_arg, op='=='),
op='&&')
else:
result = ir.ComparisonExpr(lhs_arg, rhs_arg, op='==')
return self.transform_expr(result)
return self._create_is_same_expr(lhs, rhs)
def _create_is_same_expr(self, lhs: ir.Expr, rhs: ir.Expr):
return ir.ClassMemberAccess(
inner_expr=ir.TemplateInstantiation(template_expr=GlobalLiterals.STD_IS_SAME,
args=(lhs, rhs),
instantiation_might_trigger_static_asserts=False),
expr_type=ir.BoolType(),
member_name='value')
def transform_select1st(self, args: Tuple[ir.Expr, ...]):
lhs, rhs = args
best_var = None
# First preference to non-expanded variadic vars, to keep the Select1st* expression variadic if it is now.
for var_name in compute_non_expanded_variadic_vars(rhs):
[best_var] = (var
for var in rhs.free_vars
if var.cpp_type == var_name)
break
# If there are none, then any non-variadic var is also ok.
if not best_var:
for var in rhs.free_vars:
if not var.is_variadic and isinstance(var.expr_type, (ir.BoolType, ir.Int64Type, ir.TypeType)):
best_var = var
break
if best_var:
rhs = best_var
return ir.ClassMemberAccess(inner_expr=ir.TemplateInstantiation(template_expr=select1st_literal(lhs.expr_type, rhs.expr_type),
args=(lhs, rhs),
instantiation_might_trigger_static_asserts=False),
expr_type=lhs.expr_type,
member_name='value')
| apache-2.0 |
JioEducation/edx-platform | cms/lib/xblock/tagging/migrations/0001_initial.py | 39 | 1187 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='TagAvailableValues',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=255)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='TagCategories',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, unique=True)),
('title', models.CharField(max_length=255)),
],
options={
'ordering': ('title',),
},
),
migrations.AddField(
model_name='tagavailablevalues',
name='category',
field=models.ForeignKey(to='tagging.TagCategories'),
),
]
| agpl-3.0 |
cppformat/cppformat | doc/conf.py | 16 | 8122 | # -*- coding: utf-8 -*-
#
# format documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 18 06:46:16 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, re, subprocess
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
if os.environ.get('READTHEDOCS', None) == 'True':
subprocess.call('doxygen')
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.ifconfig', 'breathe']
breathe_default_project = "format"
breathe_domain_by_extension = {"h" : "cpp"}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'contents'
# General information about the project.
project = u'fmt'
copyright = u'2012-present, Victor Zverovich'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Version and release are passed from CMake.
#version = None
# The full version, including alpha/beta/rc tags.
#release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['virtualenv']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'cpp:any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'c++'
primary_domain = 'cpp'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic-bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'formatdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'format.tex', u'fmt documentation',
u'Victor Zverovich', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fmt', u'fmt documentation', [u'Victor Zverovich'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'fmt', u'fmt documentation',
u'Victor Zverovich', 'fmt', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-2-clause |
40023247/2015cd_0505 | static/Brython3.1.1-20150328-091302/Lib/collections/__init__.py | 625 | 25849 | #__all__ = ['deque', 'defaultdict', 'Counter']
from _collections import deque, defaultdict
#from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
# fixme brython.. there is an issue with _abcoll
#from _abcoll import *
#from _abcoll import Set
from _abcoll import MutableMapping
#import _abcoll
#__all__ += _abcoll.__all__
from collections.abc import *
import collections.abc
__all__ += collections.abc.__all__
from _collections import deque, defaultdict, namedtuple
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
#fixme brython
#from weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
class Set(set):
pass
class Sequence(list):
pass
def _proxy(obj):
return obj
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
#fixme brython.. Issue with _abcoll, which contains MutableMapping
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
#fixme, brython issue
#@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
#try: # Load C helper function if available
# from _collections import _count_elements
#except ImportError:
# pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
#super().__init__() #BE modified since super not supported
dict.__init__(self)
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
########################################################################
### ChainMap (helper for configparser)
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
#fixme, brython
#@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
def __repr__(self):
return ','.join(str(_map) for _map in self.maps)
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
#raise KeyError('Key not found in the first mapping: {!r}'.format(key))
raise KeyError('Key not found in the first mapping: %s' % key)
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
################################################################################
### UserString
################################################################################
| agpl-3.0 |
auduny/home-assistant | tests/components/tplink/test_init.py | 8 | 6404 | """Tests for the TP-Link component."""
from unittest.mock import patch
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import tplink
from homeassistant.setup import async_setup_component
from pyHS100 import SmartPlug, SmartBulb
from tests.common import MockDependency, MockConfigEntry, mock_coro
MOCK_PYHS100 = MockDependency("pyHS100")
async def test_creating_entry_tries_discover(hass):
"""Test setting up does discovery."""
with MOCK_PYHS100, patch(
"homeassistant.components.tplink.async_setup_entry",
return_value=mock_coro(True),
) as mock_setup, patch(
"pyHS100.Discover.discover", return_value={"host": 1234}
):
result = await hass.config_entries.flow.async_init(
tplink.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
async def test_configuring_tplink_causes_discovery(hass):
"""Test that specifying empty config does discovery."""
with MOCK_PYHS100, patch("pyHS100.Discover.discover") as discover:
discover.return_value = {"host": 1234}
await async_setup_component(hass, tplink.DOMAIN, {"tplink": {}})
await hass.async_block_till_done()
assert len(discover.mock_calls) == 1
@pytest.mark.parametrize(
"name,cls,platform",
[
("pyHS100.SmartPlug", SmartPlug, "switch"),
("pyHS100.SmartBulb", SmartBulb, "light"),
],
)
@pytest.mark.parametrize("count", [1, 2, 3])
async def test_configuring_device_types(hass, name, cls, platform, count):
"""Test that light or switch platform list is filled correctly."""
with patch("pyHS100.Discover.discover") as discover, patch(
"pyHS100.SmartDevice._query_helper"
):
discovery_data = {
"123.123.123.{}".format(c): cls("123.123.123.123")
for c in range(count)
}
discover.return_value = discovery_data
await async_setup_component(hass, tplink.DOMAIN, {"tplink": {}})
await hass.async_block_till_done()
assert len(discover.mock_calls) == 1
assert len(hass.data[tplink.DOMAIN][platform]) == count
async def test_is_dimmable(hass):
"""Test that is_dimmable switches are correctly added as lights."""
with patch("pyHS100.Discover.discover") as discover, patch(
"homeassistant.components.tplink.light.async_setup_entry",
return_value=mock_coro(True),
) as setup, patch("pyHS100.SmartDevice._query_helper"), patch(
"pyHS100.SmartPlug.is_dimmable", True
):
dimmable_switch = SmartPlug("123.123.123.123")
discover.return_value = {"host": dimmable_switch}
await async_setup_component(hass, tplink.DOMAIN, {"tplink": {}})
await hass.async_block_till_done()
assert len(discover.mock_calls) == 1
assert len(setup.mock_calls) == 1
assert len(hass.data[tplink.DOMAIN]["light"]) == 1
assert len(hass.data[tplink.DOMAIN]["switch"]) == 0
async def test_configuring_discovery_disabled(hass):
"""Test that discover does not get called when disabled."""
with MOCK_PYHS100, patch(
"homeassistant.components.tplink.async_setup_entry",
return_value=mock_coro(True),
) as mock_setup, patch(
"pyHS100.Discover.discover", return_value=[]
) as discover:
await async_setup_component(
hass,
tplink.DOMAIN,
{tplink.DOMAIN: {tplink.CONF_DISCOVERY: False}},
)
await hass.async_block_till_done()
assert len(discover.mock_calls) == 0
assert len(mock_setup.mock_calls) == 1
async def test_platforms_are_initialized(hass):
"""Test that platforms are initialized per configuration array."""
config = {
"tplink": {
"discovery": False,
"light": [{"host": "123.123.123.123"}],
"switch": [{"host": "321.321.321.321"}],
}
}
with patch("pyHS100.Discover.discover") as discover, patch(
"pyHS100.SmartDevice._query_helper"
), patch(
"homeassistant.components.tplink.light.async_setup_entry",
return_value=mock_coro(True),
) as light_setup, patch(
"homeassistant.components.tplink.switch.async_setup_entry",
return_value=mock_coro(True),
) as switch_setup, patch(
"pyHS100.SmartPlug.is_dimmable", False
):
# patching is_dimmable is necessray to avoid misdetection as light.
await async_setup_component(hass, tplink.DOMAIN, config)
await hass.async_block_till_done()
assert len(discover.mock_calls) == 0
assert len(light_setup.mock_calls) == 1
assert len(switch_setup.mock_calls) == 1
async def test_no_config_creates_no_entry(hass):
"""Test for when there is no tplink in config."""
with MOCK_PYHS100, patch(
"homeassistant.components.tplink.async_setup_entry",
return_value=mock_coro(True),
) as mock_setup:
await async_setup_component(hass, tplink.DOMAIN, {})
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
@pytest.mark.parametrize("platform", ["switch", "light"])
async def test_unload(hass, platform):
"""Test that the async_unload_entry works."""
# As we have currently no configuration, we just to pass the domain here.
entry = MockConfigEntry(domain=tplink.DOMAIN)
entry.add_to_hass(hass)
with patch("pyHS100.SmartDevice._query_helper"), patch(
"homeassistant.components.tplink.{}"
".async_setup_entry".format(platform),
return_value=mock_coro(True),
) as light_setup:
config = {
"tplink": {
platform: [{"host": "123.123.123.123"}],
"discovery": False,
}
}
assert await async_setup_component(hass, tplink.DOMAIN, config)
await hass.async_block_till_done()
assert len(light_setup.mock_calls) == 1
assert tplink.DOMAIN in hass.data
assert await tplink.async_unload_entry(hass, entry)
assert not hass.data[tplink.DOMAIN]
| apache-2.0 |
kikusu/chainer | cupy/creation/matrix.py | 6 | 1675 | import numpy
import cupy
def diag(v, k=0):
"""Returns a diagonal or a diagonal array.
Args:
v (array-like): Array or array-like object.
k (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
Returns:
cupy.ndarray: If ``v`` indicates a 1-D array, then it returns a 2-D
array with the specified diagonal filled by ``v``. If ``v`` indicates a
2-D array, then it returns the specified diagonal of ``v``. In latter
case, if ``v`` is a :class:`cupy.ndarray` object, then its view is
returned.
.. seealso:: :func:`numpy.diag`
"""
if isinstance(v, cupy.ndarray):
if v.ndim == 1:
size = v.size + abs(k)
ret = cupy.zeros((size, size), dtype=v.dtype)
ret.diagonal(k)[:] = v
return ret
else:
return v.diagonal(k)
else:
return cupy.array(numpy.diag(v, k))
def diagflat(v, k=0):
"""Creates a diagonal array from the flattened input.
Args:
v (array-like): Array or array-like object.
k (int): Index of diagonals. See :func:`cupy.diag` for detail.
Returns:
cupy.ndarray: A 2-D diagonal array with the diagonal copied from ``v``.
"""
if isinstance(v, cupy.ndarray):
return cupy.diag(v.ravel(), k)
else:
return cupy.diag(numpy.ndarray(v).ravel(), k)
# TODO(okuta): Implement tri
# TODO(okuta): Implement tril
# TODO(okuta): Implement triu
# TODO(okuta): Implement vander
# TODO(okuta): Implement mat
# TODO(okuta): Implement bmat
| mit |
markeTIC/OCB | addons/google_drive/google_drive.py | 26 | 15793 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import SUPERUSER_ID
from openerp.addons.google_account import TIMEOUT
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
import werkzeug.urls
import urllib2
import json
import re
import openerp
_logger = logging.getLogger(__name__)
class config(osv.Model):
_name = 'google.drive.config'
_description = "Google Drive templates config"
def get_google_drive_url(self, cr, uid, config_id, res_id, template_id, context=None):
config = self.browse(cr, SUPERUSER_ID, config_id, context=context)
model = config.model_id
filter_name = config.filter_id and config.filter_id.name or False
record = self.pool.get(model.model).read(cr, uid, [res_id], context=context)[0]
record.update({'model': model.name, 'filter': filter_name})
name_gdocs = config.name_template
try:
name_gdocs = name_gdocs % record
except:
raise osv.except_osv(_('Key Error!'), _("At least one key cannot be found in your Google Drive name pattern"))
attach_pool = self.pool.get("ir.attachment")
attach_ids = attach_pool.search(cr, uid, [('res_model', '=', model.model), ('name', '=', name_gdocs), ('res_id', '=', res_id)])
url = False
if attach_ids:
attachment = attach_pool.browse(cr, uid, attach_ids[0], context)
url = attachment.url
else:
url = self.copy_doc(cr, uid, res_id, template_id, name_gdocs, model.model, context).get('url')
return url
def get_access_token(self, cr, uid, scope=None, context=None):
ir_config = self.pool['ir.config_parameter']
google_drive_refresh_token = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_refresh_token')
user_is_admin = self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')
if not google_drive_refresh_token:
if user_is_admin:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base_setup', 'action_general_configuration')
msg = _("You haven't configured 'Authorization Code' generated from google, Please generate and configure it .")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
raise osv.except_osv(_('Error!'), _("Google Drive is not yet configured. Please contact your administrator."))
google_drive_client_id = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_client_id')
google_drive_client_secret = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_client_secret')
#For Getting New Access Token With help of old Refresh Token
data = werkzeug.url_encode(dict(client_id=google_drive_client_id,
refresh_token=google_drive_refresh_token,
client_secret=google_drive_client_secret,
grant_type="refresh_token",
scope=scope or 'https://www.googleapis.com/auth/drive'))
headers = {"Content-type": "application/x-www-form-urlencoded"}
try:
req = urllib2.Request('https://accounts.google.com/o/oauth2/token', data, headers)
content = urllib2.urlopen(req, timeout=TIMEOUT).read()
except urllib2.HTTPError:
if user_is_admin:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base_setup', 'action_general_configuration')
msg = _("Something went wrong during the token generation. Please request again an authorization code .")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
raise osv.except_osv(_('Error!'), _("Google Drive is not yet configured. Please contact your administrator."))
content = json.loads(content)
return content.get('access_token')
def copy_doc(self, cr, uid, res_id, template_id, name_gdocs, res_model, context=None):
ir_config = self.pool['ir.config_parameter']
google_web_base_url = ir_config.get_param(cr, SUPERUSER_ID, 'web.base.url')
access_token = self.get_access_token(cr, uid, context=context)
# Copy template in to drive with help of new access token
request_url = "https://www.googleapis.com/drive/v2/files/%s?fields=parents/id&access_token=%s" % (template_id, access_token)
headers = {"Content-type": "application/x-www-form-urlencoded"}
try:
req = urllib2.Request(request_url, None, headers)
parents = urllib2.urlopen(req, timeout=TIMEOUT).read()
except urllib2.HTTPError:
raise osv.except_osv(_('Warning!'), _("The Google Template cannot be found. Maybe it has been deleted."))
parents_dict = json.loads(parents)
record_url = "Click on link to open Record in Odoo\n %s/?db=%s#id=%s&model=%s" % (google_web_base_url, cr.dbname, res_id, res_model)
data = {"title": name_gdocs, "description": record_url, "parents": parents_dict['parents']}
request_url = "https://www.googleapis.com/drive/v2/files/%s/copy?access_token=%s" % (template_id, access_token)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data_json = json.dumps(data)
# resp, content = Http().request(request_url, "POST", data_json, headers)
req = urllib2.Request(request_url, data_json, headers)
content = urllib2.urlopen(req, timeout=TIMEOUT).read()
content = json.loads(content)
res = {}
if content.get('alternateLink'):
attach_pool = self.pool.get("ir.attachment")
attach_vals = {'res_model': res_model, 'name': name_gdocs, 'res_id': res_id, 'type': 'url', 'url': content['alternateLink']}
res['id'] = attach_pool.create(cr, uid, attach_vals)
# Commit in order to attach the document to the current object instance, even if the permissions has not been written.
cr.commit()
res['url'] = content['alternateLink']
key = self._get_key_from_url(res['url'])
request_url = "https://www.googleapis.com/drive/v2/files/%s/permissions?emailMessage=This+is+a+drive+file+created+by+Odoo&sendNotificationEmails=false&access_token=%s" % (key, access_token)
data = {'role': 'writer', 'type': 'anyone', 'value': '', 'withLink': True}
try:
req = urllib2.Request(request_url, json.dumps(data), headers)
urllib2.urlopen(req, timeout=TIMEOUT)
except urllib2.HTTPError:
raise self.pool.get('res.config.settings').get_config_warning(cr, _("The permission 'reader' for 'anyone with the link' has not been written on the document"), context=context)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if user.email:
data = {'role': 'writer', 'type': 'user', 'value': user.email}
try:
req = urllib2.Request(request_url, json.dumps(data), headers)
urllib2.urlopen(req, timeout=TIMEOUT)
except urllib2.HTTPError:
pass
return res
def get_google_drive_config(self, cr, uid, res_model, res_id, context=None):
'''
Function called by the js, when no google doc are yet associated with a record, with the aim to create one. It
will first seek for a google.docs.config associated with the model `res_model` to find out what's the template
of google doc to copy (this is usefull if you want to start with a non-empty document, a type or a name
different than the default values). If no config is associated with the `res_model`, then a blank text document
with a default name is created.
:param res_model: the object for which the google doc is created
:param ids: the list of ids of the objects for which the google doc is created. This list is supposed to have
a length of 1 element only (batch processing is not supported in the code, though nothing really prevent it)
:return: the config id and config name
'''
if not res_id:
raise osv.except_osv(_('Google Drive Error!'), _("Creating google drive may only be done by one at a time."))
# check if a model is configured with a template
config_ids = self.search(cr, uid, [('model_id', '=', res_model)], context=context)
configs = []
for config in self.browse(cr, uid, config_ids, context=context):
if config.filter_id:
if (config.filter_id.user_id and config.filter_id.user_id.id != uid):
#Private
continue
domain = [('id', 'in', [res_id])] + eval(config.filter_id.domain)
local_context = context and context.copy() or {}
local_context.update(eval(config.filter_id.context))
google_doc_configs = self.pool.get(config.filter_id.model_id).search(cr, uid, domain, context=local_context)
if google_doc_configs:
configs.append({'id': config.id, 'name': config.name})
else:
configs.append({'id': config.id, 'name': config.name})
return configs
def _get_key_from_url(self, url):
mo = re.search("(key=|/d/)([A-Za-z0-9-_]+)", url)
if mo:
return mo.group(2)
return None
def _resource_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for data in self.browse(cr, uid, ids, context):
mo = self._get_key_from_url(data.google_drive_template_url)
if mo:
result[data.id] = mo
else:
raise osv.except_osv(_('Incorrect URL!'), _("Please enter a valid Google Document URL."))
return result
def _client_id_get(self, cr, uid, ids, name, arg, context=None):
result = {}
client_id = self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'google_drive_client_id')
for config_id in ids:
result[config_id] = client_id
return result
_columns = {
'name': fields.char('Template Name', required=True),
'model_id': fields.many2one('ir.model', 'Model', ondelete='set null', required=True),
'model': fields.related('model_id', 'model', type='char', string='Model', readonly=True),
'filter_id': fields.many2one('ir.filters', 'Filter', domain="[('model_id', '=', model)]"),
'google_drive_template_url': fields.char('Template URL', required=True, size=1024),
'google_drive_resource_id': fields.function(_resource_get, type="char", string='Resource Id'),
'google_drive_client_id': fields.function(_client_id_get, type="char", string='Google Client '),
'name_template': fields.char('Google Drive Name Pattern', help='Choose how the new google drive will be named, on google side. Eg. gdoc_%(field_name)s', required=True),
'active': fields.boolean('Active'),
}
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
res = {}
if model_id:
model = self.pool['ir.model'].browse(cr, uid, model_id, context=context)
res['value'] = {'model': model.model}
else:
res['value'] = {'filter_id': False, 'model': False}
return res
_defaults = {
'name_template': 'Document %(name)s',
'active': True,
}
def _check_model_id(self, cr, uid, ids, context=None):
config_id = self.browse(cr, uid, ids[0], context=context)
if config_id.filter_id and config_id.model_id.model != config_id.filter_id.model_id:
return False
return True
_constraints = [
(_check_model_id, 'Model of selected filter is not matching with model of current template.', ['model_id', 'filter_id']),
]
def get_google_scope(self):
return 'https://www.googleapis.com/auth/drive https://www.googleapis.com/auth/drive.file'
class base_config_settings(osv.TransientModel):
_inherit = "base.config.settings"
def _get_drive_uri(self, cr, uid, ids, field_name, arg, context=None):
return {
wizard_id: self.default_get(cr, uid, ['google_drive_uri']).get('google_drive_uri')
for wizard_id in ids
}
def _get_wizard_ids(self, cr, uid, ids, context=None):
result = []
if any(rec.key in ['google_drive_client_id', 'google_redirect_uri'] for rec in self.browse(cr, uid, ids, context=context)):
result.extend(self.pool['base.config.settings'].search(cr, uid, [], context=context))
return result
_columns = {
'google_drive_authorization_code': fields.char('Authorization Code'),
'google_drive_uri': fields.function(_get_drive_uri, string='URI', help="The URL to generate the authorization code from Google", type="char", store={
'ir.config_parameter': (_get_wizard_ids, None, 20),
}), # TODO: 1. in master, remove the store, there is no reason for this field to be stored. It's just a dynamic link.
# TODO: 2. when converted to the new API, the code to get the default value can be moved to the compute method directly, and the default value can be removed
# the only reason the default value is defined is because function fields are not computed in draft mode in the old API.
}
_defaults = {
'google_drive_uri': lambda s, cr, uid, c: s.pool['google.service']._get_google_token_uri(cr, uid, 'drive', scope=s.pool['google.drive.config'].get_google_scope(), context=c),
'google_drive_authorization_code': lambda s, cr, uid, c: s.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'google_drive_authorization_code', context=c),
}
def set_google_authorization_code(self, cr, uid, ids, context=None):
ir_config_param = self.pool['ir.config_parameter']
config = self.browse(cr, uid, ids[0], context)
auth_code = config.google_drive_authorization_code
if auth_code and auth_code != ir_config_param.get_param(cr, uid, 'google_drive_authorization_code', context=context):
refresh_token = self.pool['google.service'].generate_refresh_token(cr, uid, 'drive', config.google_drive_authorization_code, context=context)
ir_config_param.set_param(cr, uid, 'google_drive_authorization_code', auth_code, groups=['base.group_system'])
ir_config_param.set_param(cr, uid, 'google_drive_refresh_token', refresh_token, groups=['base.group_system'])
| agpl-3.0 |
Eigenstate/dabble | convert_step5_to_dabble.py | 2 | 1142 | #!/usr/bin/env python
"""
Converts a step5_assembly.{psf,pdb} to a mae file appropriate
for membrane input to dabble
"""
from __future__ import print_function
import os
from vmd import atomsel, molecule
thedir = os.path.abspath(input("Which directory contains step5_assembly.{psf,crd}? > "))
if not os.path.isdir(thedir):
raise ValueError("%s not a valid directory" % thedir)
crd = os.path.join(thedir, "step5_assembly.crd")
psf = os.path.join(thedir, "step5_assembly.psf")
if not os.path.isfile(crd):
raise ValueError("No pdb file in directory!")
if not os.path.isfile(psf):
raise ValueError("No psf file in directory!")
molid = molecule.load('psf', psf, 'cor', crd)
xs = atomsel().get('x')
ys = atomsel().get('y')
zs = atomsel().get('z')
# 0.5A buffer to make it tile nicer
molecule.set_periodic(molid=molid,
a=max(xs)-min(xs)-8.0,
b=max(ys)-min(ys)-8.0,
c=max(zs)-min(zs)-8.0,
alpha=90., beta=90., gamma=90.)
outfile = os.path.join(thedir, "step5_assembly_dabble.mae")
molecule.write(molid=molid, filetype='mae', filename=outfile)
| gpl-2.0 |
Deepakkothandan/ansible | lib/ansible/plugins/cliconf/iosxr.py | 58 | 2900 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from itertools import chain
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network_common import to_list
from ansible.plugins.cliconf import CliconfBase
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'iosxr'
reply = self.get(b'show version brief')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Version (\S+)$', data, re.M)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'image file is "(.+)"', data)
if match:
device_info['network_os_image'] = match.group(1)
match = re.search(r'^Cisco (.+) \(revision', data, re.M)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'^(.+) uptime', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_config(self, source='running'):
lookup = {'running': 'running-config'}
if source not in lookup:
return self.invalid_params("fetching configuration from %s is not supported" % source)
return self.send_command(to_bytes(b'show %s' % lookup[source], errors='surrogate_or_strict'))
def edit_config(self, command):
for cmd in chain([b'configure'], to_list(command), [b'end']):
self.send_command(cmd)
def get(self, *args, **kwargs):
return self.send_command(*args, **kwargs)
def commit(self, comment=None):
if comment:
command = b'commit comment {0}'.format(comment)
else:
command = b'commit'
self.send_command(command)
def discard_changes(self):
self.send_command(b'abort')
def get_capabilities(self):
result = {}
result['rpc'] = self.get_base_rpc() + ['commit', 'discard_changes']
result['network_api'] = 'cliconf'
result['device_info'] = self.get_device_info()
return json.dumps(result)
| gpl-3.0 |
melmothx/jsonbot | jsb/plugs/wave/gadget.py | 1 | 4625 | # jsb.plugs.wave/gadget.py
#
#
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.lib.persist import PlugPersist
gadgeturls = PlugPersist('gadgeturls')
gadgeturls.data['gadget'] = 'https://jsonbot.appspot.com/gadget.xml'
gadgeturls.data['poll'] = 'https://jsonbot.appspot.com/poll.xml'
gadgeturls.data['iframe'] = 'https://jsonbot.appspot.com/iframe.xml'
gadgeturls.data['loadiframe'] = 'https://jsonbot.appspot.com/loadiframe.xml'
def loadroot(event, url):
if event.rootblip:
from waveapi import element
event.rootblip.append(element.Gadget(url))
return True
else:
event.reply("can't find root blip.")
return False
def load(event, url):
if event.blip:
from waveapi import element
event.blip.append(element.Gadget(url))
return True
else:
event.reply("can't find root blip.")
return False
def handle_gadgetload(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<gadgetname>')
return
try:
url = gadgeturls.data[event.rest]
if load(event, url):
event.reply('loaded %s' % url)
except KeyError:
event.reply("we don't have a url for %s" % event.rest)
cmnds.add("gadget-load", handle_gadgetload, 'USER')
examples.add("gadget-load", "load a gadget into a blip", "gadget-load")
def handle_gadgetloadroot(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<gadgetname>')
return
try:
url = gadgeturls.data[event.rest]
if loadroot(event, url):
event.reply('loaded %s' % url)
except KeyError:
event.reply("we don't have a url for %s" % event.rest)
cmnds.add("gadget-loadroot", handle_gadgetloadroot, 'USER')
examples.add("gadget-loadroot", "load a gadget into the root blip", "gadget-loadroot")
def handle_gadgetiframe(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<url>')
return
try:
url = gadgeturls.data['loadiframe'] + "?&iframeurl=%s" % event.rest
event.reply('loading %s' % url)
load(event, url)
except KeyError:
event.reply("we don't have a iframe url")
cmnds.add("gadget-iframe", handle_gadgetiframe, 'USER')
examples.add("gadget-iframe", "load a url into a iframe", "gadget-iframe")
def handle_gadgetaddurl(bot, event):
try:
(name, url) = event.args
except ValueError:
event.missing('<name> <url>')
return
if not gadgeturls.data.has_key(name):
gadgeturls.data[name] = url
gadgeturls.save()
else:
event.reply("we already have a %s gadget" % name)
cmnds.add("gadget-addurl", handle_gadgetaddurl, 'USER')
examples.add("gadget-addurl", "store a gadget url", "gadget-addurl jsb https://jsonbot.appspot.com/iframe.xml")
def handle_gadgetdelurl(bot, event):
try:
(name, url) = event.args
except ValueError:
event.missing('<name> <url>')
return
gadgeturls.data[name] = url
gadgeturls.save()
cmnds.add("gadget-delurl", handle_gadgetdelurl, 'OPER')
examples.add("gadget-delurl", "delete a gadget url", "gadget-delurl mygadget")
def handle_gadgetlist(bot, event):
result = []
for name, url in gadgeturls.data.iteritems():
result.append("%s - %s" % (name, url))
event.reply("available gadgets: ", result)
cmnds.add("gadget-list", handle_gadgetlist, 'USER')
examples.add("gadget-list", "list known gadget urls", "gadget-list")
def handle_gadgetconsole(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
wave = event.chan
if wave.data.feeds and wave.data.dotitle:
event.set_title("JSONBOT - %s #%s" % (" - ".join(wave.data.feeds), str(wave.data.nrcloned)))
from waveapi import element
#url = gadgeturls.data['loadiframe'] + "?&iframeurl=https://jsonbot.appspot.com"
#event.reply('loading %s' % url)
event.append("loading ...\n")
#load(event, "http://jsonbot.appspot.com/iframe.xml")
event.append(
element.Gadget('http://jsonbot.appspot.com/console.xml?gadget_cache=0'))
cmnds.add("gadget-console", handle_gadgetconsole, 'OPER')
examples.add("gadget-console", "load the console gadget", "gadget-console")
| mit |
tillahoffmann/tensorflow | tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py | 6 | 7314 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SelfAdjointEigTest(test.TestCase):
def testWrongDimensions(self):
# The input to self_adjoint_eig should be a tensor of
# at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaises(ValueError):
linalg_ops.self_adjoint_eig(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.self_adjoint_eig(vector)
def SortEigenDecomposition(e, v):
if v.ndim < 2:
return e, v
else:
perm = np.argsort(e, -1)
return np.take(e, perm, -1), np.take(v, perm, -1)
def NormalizeEigenvectorsPhase(v):
"""Normalizes the phase of the Eigenvectors stored in the columns of `v`.
(complex) Eigenvectors are only unique up to an arbitrary phase.
We normalize the vectors such that the first component has phase 0.
Args:
v: `np.ndarray` with Eigenvectors as returned from `np.linalg.eigh`.
Returns:
`np.ndarray` normalized Eigenvectors.
"""
reference = v / np.linalg.norm(v[..., 0:1, :], axis=-1, keepdims=True)
return v * reference.conj()
def _GetSelfAdjointEigTest(dtype_, shape_, compute_v_):
def CompareEigenVectors(self, x, y, tol):
x = NormalizeEigenvectorsPhase(x)
y = NormalizeEigenvectorsPhase(y)
self.assertAllClose(x, y, atol=tol, rtol=tol)
def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, tol):
num_batches = int(np.prod(x_e.shape[:-1]))
n = x_e.shape[-1]
x_e = np.reshape(x_e, [num_batches] + [n])
x_v = np.reshape(x_v, [num_batches] + [n, n])
y_e = np.reshape(y_e, [num_batches] + [n])
y_v = np.reshape(y_v, [num_batches] + [n, n])
for i in range(num_batches):
x_ei, x_vi = SortEigenDecomposition(x_e[i, :], x_v[i, :, :])
y_ei, y_vi = SortEigenDecomposition(y_e[i, :], y_v[i, :, :])
self.assertAllClose(x_ei, y_ei, atol=tol, rtol=tol)
CompareEigenVectors(self, x_vi, y_vi, tol)
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
atol = 1e-4
else:
atol = 1e-12
np_e, np_v = np.linalg.eigh(a)
with self.test_session():
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))
# Check that V*diag(E)*V^T is close to A.
a_ev = math_ops.matmul(
math_ops.matmul(tf_v, array_ops.matrix_diag(tf_e)),
tf_v,
adjoint_b=True)
self.assertAllClose(a_ev.eval(), a, atol=atol)
# Compare to numpy.linalg.eigh.
CompareEigenDecompositions(self, np_e, np_v,
tf_e.eval(), tf_v.eval(), atol)
else:
tf_e = linalg_ops.self_adjoint_eigvals(constant_op.constant(a))
self.assertAllClose(
np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)
return Test
class SelfAdjointEigGradTest(test.TestCase):
pass # Filled in below
def _GetSelfAdjointEigGradTest(dtype_, shape_, compute_v_):
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(np_dtype).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
tol = 1e-2
else:
tol = 1e-7
with self.test_session():
tf_a = constant_op.constant(a)
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
reference = tf_v / linalg_ops.norm(
tf_v[..., 0:1, :], axis=-1, keep_dims=True)
tf_v *= math_ops.conj(reference)
outputs = [tf_e, tf_v]
else:
tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
outputs = [tf_e,]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
x_init += np.conj(x_init.T)
x_init = np.tile(x_init, batch_shape + (1, 1))
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
for compute_v in [True, False]:
for dtype in (
dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128):
for size in 1, 2, 5, 10:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(size, size) < 10):
shape = batch_dims + (size, size)
name = '%s_%s_%s' % (dtype, '_'.join(map(str, shape)), compute_v)
setattr(SelfAdjointEigTest, 'testSelfAdjointEig_' + name,
_GetSelfAdjointEigTest(dtype, shape, compute_v))
setattr(SelfAdjointEigGradTest, 'testSelfAdjointEigGrad_' + name,
_GetSelfAdjointEigGradTest(dtype, shape, compute_v))
test.main()
| apache-2.0 |
Metaswitch/horizon | openstack_dashboard/dashboards/project/loadbalancers/forms.py | 45 | 12071 | # Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class UpdatePool(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"))
pool_id = forms.CharField(label=_("ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
description = forms.CharField(required=False,
max_length=80, label=_("Description"))
lb_method = forms.ChoiceField(label=_("Load Balancing Method"))
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
failure_url = 'horizon:project:loadbalancers:index'
def __init__(self, request, *args, **kwargs):
super(UpdatePool, self).__init__(request, *args, **kwargs)
lb_method_choices = [('ROUND_ROBIN', 'ROUND_ROBIN'),
('LEAST_CONNECTIONS', 'LEAST_CONNECTIONS'),
('SOURCE_IP', 'SOURCE_IP')]
self.fields['lb_method'].choices = lb_method_choices
def handle(self, request, context):
context['admin_state_up'] = (context['admin_state_up'] == 'True')
try:
data = {'pool': {'name': context['name'],
'description': context['description'],
'lb_method': context['lb_method'],
'admin_state_up': context['admin_state_up'],
}}
pool = api.lbaas.pool_update(request, context['pool_id'], **data)
msg = _('Pool %s was successfully updated.') % context['name']
LOG.debug(msg)
messages.success(request, msg)
return pool
except Exception:
msg = _('Failed to update pool %s') % context['name']
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdateVip(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"))
vip_id = forms.CharField(label=_("ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
description = forms.CharField(required=False,
max_length=80, label=_("Description"))
pool_id = forms.ChoiceField(label=_("Pool"))
session_persistence = forms.ChoiceField(
required=False, initial={}, label=_("Session Persistence"))
cookie_name = forms.CharField(
initial="", required=False,
max_length=80, label=_("Cookie Name"),
help_text=_("Required for APP_COOKIE persistence;"
" Ignored otherwise."))
connection_limit = forms.IntegerField(
min_value=-1, label=_("Connection Limit"),
help_text=_("Maximum number of connections allowed "
"for the VIP or '-1' if the limit is not set"))
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
failure_url = 'horizon:project:loadbalancers:index'
def __init__(self, request, *args, **kwargs):
super(UpdateVip, self).__init__(request, *args, **kwargs)
pool_id_choices = []
try:
tenant_id = request.user.tenant_id
pools = api.lbaas.pool_list(request, tenant_id=tenant_id)
except Exception:
pools = []
exceptions.handle(request,
_('Unable to retrieve pools list.'))
pools = sorted(pools,
key=lambda pool: pool.name)
for p in pools:
if (p.vip_id is None) or (p.id == kwargs['initial']['pool_id']):
pool_id_choices.append((p.id, p.name))
self.fields['pool_id'].choices = pool_id_choices
session_persistence_choices = []
for mode in ('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'):
session_persistence_choices.append((mode, mode))
session_persistence_choices.append(('', _('No session persistence')))
self.fields[
'session_persistence'].choices = session_persistence_choices
def clean(self):
cleaned_data = super(UpdateVip, self).clean()
persistence = cleaned_data.get('session_persistence')
if (persistence == 'APP_COOKIE' and
not cleaned_data.get('cookie_name')):
msg = _('Cookie name is required for APP_COOKIE persistence.')
self._errors['cookie_name'] = self.error_class([msg])
return cleaned_data
def handle(self, request, context):
context['admin_state_up'] = (context['admin_state_up'] == 'True')
if context['session_persistence']:
stype = context['session_persistence']
if stype == 'APP_COOKIE':
cookie = context['cookie_name']
context['session_persistence'] = {'type': stype,
'cookie_name': cookie}
else:
context['session_persistence'] = {'type': stype}
else:
context['session_persistence'] = {}
try:
data = {'vip': {'name': context['name'],
'description': context['description'],
'pool_id': context['pool_id'],
'session_persistence':
context['session_persistence'],
'connection_limit': context['connection_limit'],
'admin_state_up': context['admin_state_up'],
}}
vip = api.lbaas.vip_update(request, context['vip_id'], **data)
msg = _('VIP %s was successfully updated.') % context['name']
LOG.debug(msg)
messages.success(request, msg)
return vip
except Exception:
msg = _('Failed to update VIP %s') % context['name']
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdateMember(forms.SelfHandlingForm):
member_id = forms.CharField(label=_("ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
pool_id = forms.ChoiceField(label=_("Pool"))
weight = forms.IntegerField(max_value=256, min_value=0, label=_("Weight"),
help_text=_("Relative part of requests this "
"pool member serves compared to others"))
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
failure_url = 'horizon:project:loadbalancers:index'
def __init__(self, request, *args, **kwargs):
super(UpdateMember, self).__init__(request, *args, **kwargs)
pool_id_choices = []
try:
tenant_id = request.user.tenant_id
pools = api.lbaas.pool_list(request, tenant_id=tenant_id)
except Exception:
pools = []
exceptions.handle(request,
_('Unable to retrieve pools list.'))
pools = sorted(pools,
key=lambda pool: pool.name)
for p in pools:
pool_id_choices.append((p.id, p.name))
self.fields['pool_id'].choices = pool_id_choices
def handle(self, request, context):
context['admin_state_up'] = (context['admin_state_up'] == 'True')
try:
data = {'member': {'pool_id': context['pool_id'],
'weight': context['weight'],
'admin_state_up': context['admin_state_up']}}
member = api.lbaas.member_update(request,
context['member_id'], **data)
msg = _('Member %s was successfully updated.')\
% context['member_id']
LOG.debug(msg)
messages.success(request, msg)
return member
except Exception:
msg = _('Failed to update member %s') % context['member_id']
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdateMonitor(forms.SelfHandlingForm):
monitor_id = forms.CharField(label=_("ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
delay = forms.IntegerField(
min_value=1,
label=_("Delay"),
help_text=_("The minimum time in seconds between regular checks "
"of a member. It must be greater than or equal to "
"timeout"))
timeout = forms.IntegerField(
min_value=1,
label=_("Timeout"),
help_text=_("The maximum time in seconds for a monitor to wait "
"for a reply. It must be less than or equal to delay"))
max_retries = forms.IntegerField(
max_value=10, min_value=1,
label=_("Max Retries (1~10)"),
help_text=_("Number of permissible failures before changing "
"the status of member to inactive"))
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
failure_url = 'horizon:project:loadbalancers:index'
def __init__(self, request, *args, **kwargs):
super(UpdateMonitor, self).__init__(request, *args, **kwargs)
def clean(self):
cleaned_data = super(UpdateMonitor, self).clean()
delay = cleaned_data.get('delay')
timeout = cleaned_data.get('timeout')
if not delay >= timeout:
msg = _('Delay must be greater than or equal to timeout')
self._errors['delay'] = self.error_class([msg])
return cleaned_data
def handle(self, request, context):
context['admin_state_up'] = (context['admin_state_up'] == 'True')
try:
data = {'health_monitor': {
'delay': context['delay'],
'timeout': context['timeout'],
'max_retries': context['max_retries'],
'admin_state_up': context['admin_state_up']}}
monitor = api.lbaas.pool_health_monitor_update(
request, context['monitor_id'], **data)
msg = _('Health monitor %s was successfully updated.')\
% context['monitor_id']
LOG.debug(msg)
messages.success(request, msg)
return monitor
except Exception:
msg = _('Failed to update health monitor %s')\
% context['monitor_id']
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
| apache-2.0 |
zhouzhenghui/python-for-android | python-build/python-libs/gdata/src/gdata/tlslite/utils/xmltools.py | 259 | 7358 | """Helper functions for XML.
This module has misc. helper functions for working with XML DOM nodes."""
import re
from compat import *
import os
if os.name != "java":
from xml.dom import minidom
from xml.sax import saxutils
def parseDocument(s):
return minidom.parseString(s)
else:
from javax.xml.parsers import *
import java
builder = DocumentBuilderFactory.newInstance().newDocumentBuilder()
def parseDocument(s):
stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes())
return builder.parse(stream)
def parseAndStripWhitespace(s):
try:
element = parseDocument(s).documentElement
except BaseException, e:
raise SyntaxError(str(e))
stripWhitespace(element)
return element
#Goes through a DOM tree and removes whitespace besides child elements,
#as long as this whitespace is correctly tab-ified
def stripWhitespace(element, tab=0):
element.normalize()
lastSpacer = "\n" + ("\t"*tab)
spacer = lastSpacer + "\t"
#Zero children aren't allowed (i.e. <empty/>)
#This makes writing output simpler, and matches Canonical XML
if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython
raise SyntaxError("Empty XML elements not allowed")
#If there's a single child, it must be text context
if element.childNodes.length==1:
if element.firstChild.nodeType == element.firstChild.TEXT_NODE:
#If it's an empty element, remove
if element.firstChild.data == lastSpacer:
element.removeChild(element.firstChild)
return
#If not text content, give an error
elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
else:
raise SyntaxError("Unexpected node type in XML document")
#Otherwise there's multiple child element
child = element.firstChild
while child:
if child.nodeType == child.ELEMENT_NODE:
stripWhitespace(child, tab+1)
child = child.nextSibling
elif child.nodeType == child.TEXT_NODE:
if child == element.lastChild:
if child.data != lastSpacer:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
elif child.data != spacer:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
next = child.nextSibling
element.removeChild(child)
child = next
else:
raise SyntaxError("Unexpected node type in XML document")
def checkName(element, name):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Missing element: '%s'" % name)
if name == None:
return
if element.tagName != name:
raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName))
def getChild(element, index, name=None):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getChild()")
child = element.childNodes.item(index)
if child == None:
raise SyntaxError("Missing child: '%s'" % name)
checkName(child, name)
return child
def getChildIter(element, index):
class ChildIter:
def __init__(self, element, index):
self.element = element
self.index = index
def next(self):
if self.index < len(self.element.childNodes):
retVal = self.element.childNodes.item(self.index)
self.index += 1
else:
retVal = None
return retVal
def checkEnd(self):
if self.index != len(self.element.childNodes):
raise SyntaxError("Too many elements under: '%s'" % self.element.tagName)
return ChildIter(element, index)
def getChildOrNone(element, index):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getChild()")
child = element.childNodes.item(index)
return child
def getLastChild(element, index, name=None):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getLastChild()")
child = element.childNodes.item(index)
if child == None:
raise SyntaxError("Missing child: '%s'" % name)
if child != element.lastChild:
raise SyntaxError("Too many elements under: '%s'" % element.tagName)
checkName(child, name)
return child
#Regular expressions for syntax-checking attribute and element content
nsRegEx = "http://trevp.net/cryptoID\Z"
cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z"
urlRegEx = "http(s)?://.{1,100}\Z"
sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z"
base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z"
certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z"
keyRegEx = "[A-Z]\Z"
keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z"
dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z"
shortStringRegEx = ".{1,100}\Z"
exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z"
notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1
booleanRegEx = "(true)|(false)"
def getReqAttribute(element, attrName, regEx=""):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getReqAttribute()")
value = element.getAttribute(attrName)
if not value:
raise SyntaxError("Missing Attribute: " + attrName)
if not re.match(regEx, value):
raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value))
element.removeAttribute(attrName)
return str(value) #de-unicode it; this is needed for bsddb, for example
def getAttribute(element, attrName, regEx=""):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getAttribute()")
value = element.getAttribute(attrName)
if value:
if not re.match(regEx, value):
raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value))
element.removeAttribute(attrName)
return str(value) #de-unicode it; this is needed for bsddb, for example
def checkNoMoreAttributes(element):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in checkNoMoreAttributes()")
if element.attributes.length!=0:
raise SyntaxError("Extra attributes on '%s'" % element.tagName)
def getText(element, regEx=""):
textNode = element.firstChild
if textNode == None:
raise SyntaxError("Empty element '%s'" % element.tagName)
if textNode.nodeType != textNode.TEXT_NODE:
raise SyntaxError("Non-text node: '%s'" % element.tagName)
if not re.match(regEx, textNode.data):
raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data))
return str(textNode.data) #de-unicode it; this is needed for bsddb, for example
#Function for adding tabs to a string
def indent(s, steps, ch="\t"):
tabs = ch*steps
if s[-1] != "\n":
s = tabs + s.replace("\n", "\n"+tabs)
else:
s = tabs + s.replace("\n", "\n"+tabs)
s = s[ : -len(tabs)]
return s
def escape(s):
return saxutils.escape(s)
| apache-2.0 |
chhao91/QGIS | python/plugins/processing/algs/gdal/information.py | 11 | 3028 | # -*- coding: utf-8 -*-
"""
***************************************************************************
information.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputHTML
from processing.algs.gdal.GdalUtils import GdalUtils
class information(GdalAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NOGCP = 'NOGCP'
NOMETADATA = 'NOMETADATA'
def commandLineName(self):
return "gdalorg:rasterinfo"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Information')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Miscellaneous')
self.addParameter(ParameterRaster(information.INPUT,
self.tr('Input layer'), False))
self.addParameter(ParameterBoolean(information.NOGCP,
self.tr('Suppress GCP info'), False))
self.addParameter(ParameterBoolean(information.NOMETADATA,
self.tr('Suppress metadata info'), False))
self.addOutput(OutputHTML(information.OUTPUT,
self.tr('Layer information')))
def getConsoleCommands(self):
arguments = []
if self.getParameterValue(information.NOGCP):
arguments.append('-nogcp')
if self.getParameterValue(information.NOMETADATA):
arguments.append('-nomd')
arguments.append(self.getParameterValue(information.INPUT))
return ['gdalinfo', GdalUtils.escapeAndJoin(arguments)]
def processAlgorithm(self, progress):
GdalUtils.runGdal(self.getConsoleCommands(), progress)
output = self.getOutputValue(information.OUTPUT)
f = open(output, 'w')
f.write('<pre>')
for s in GdalUtils.getConsoleOutput()[1:]:
f.write(unicode(s))
f.write('</pre>')
f.close()
| gpl-2.0 |
ShineFan/odoo | addons/purchase/__init__.py | 439 | 1185 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase
import partner
import stock
import wizard
import report
import stock
import company
import edi
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mmbtba/odoo | addons/account/project/wizard/account_analytic_inverted_balance_report.py | 378 | 2045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_inverted_balance(osv.osv_memory):
_name = 'account.analytic.inverted.balance'
_description = 'Account Analytic Inverted Balance'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.analytic.account',
'form': data
}
datas['form']['active_ids'] = context.get('active_ids', False)
return self.pool['report'].get_action(cr, uid, [], 'account.report_invertedanalyticbalance', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ryangallen/django | tests/pagination/tests.py | 266 | 13459 | from __future__ import unicode_literals
import unittest
from datetime import datetime
from django.core.paginator import (
EmptyPage, InvalidPage, PageNotAnInteger, Paginator,
)
from django.test import TestCase
from django.utils import six
from .custom import ValidAdjacentNumsPaginator
from .models import Article
class PaginationTests(unittest.TestCase):
"""
Tests for the Paginator and Page classes.
"""
def check_paginator(self, params, output):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that its attributes match the passed output.
"""
count, num_pages, page_range = output
paginator = Paginator(*params)
self.check_attribute('count', paginator, count, params)
self.check_attribute('num_pages', paginator, num_pages, params)
self.check_attribute('page_range', paginator, page_range, params, coerce=list)
def check_attribute(self, name, paginator, expected, params, coerce=None):
"""
Helper method that checks a single attribute and gives a nice error
message upon test failure.
"""
got = getattr(paginator, name)
if coerce is not None:
got = coerce(got)
self.assertEqual(expected, got,
"For '%s', expected %s but got %s. Paginator parameters were: %s"
% (name, expected, got, params))
def test_paginator(self):
"""
Tests the paginator attributes using varying inputs.
"""
nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]
ten = nine + [10]
eleven = ten + [11]
tests = (
# Each item is two tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is resulting Paginator attributes - count,
# num_pages, and page_range.
# Ten items, varying orphans, no empty first page.
((ten, 4, 0, False), (10, 3, [1, 2, 3])),
((ten, 4, 1, False), (10, 3, [1, 2, 3])),
((ten, 4, 2, False), (10, 2, [1, 2])),
((ten, 4, 5, False), (10, 2, [1, 2])),
((ten, 4, 6, False), (10, 1, [1])),
# Ten items, varying orphans, allow empty first page.
((ten, 4, 0, True), (10, 3, [1, 2, 3])),
((ten, 4, 1, True), (10, 3, [1, 2, 3])),
((ten, 4, 2, True), (10, 2, [1, 2])),
((ten, 4, 5, True), (10, 2, [1, 2])),
((ten, 4, 6, True), (10, 1, [1])),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1, [1])),
(([1], 4, 1, False), (1, 1, [1])),
(([1], 4, 2, False), (1, 1, [1])),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1, [1])),
(([1], 4, 1, True), (1, 1, [1])),
(([1], 4, 2, True), (1, 1, [1])),
# Zero items, varying orphans, no empty first page.
(([], 4, 0, False), (0, 0, [])),
(([], 4, 1, False), (0, 0, [])),
(([], 4, 2, False), (0, 0, [])),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 1, [1])),
(([], 4, 1, True), (0, 1, [1])),
(([], 4, 2, True), (0, 1, [1])),
# Number if items one less than per_page.
(([], 1, 0, True), (0, 1, [1])),
(([], 1, 0, False), (0, 0, [])),
(([1], 2, 0, True), (1, 1, [1])),
((nine, 10, 0, True), (9, 1, [1])),
# Number if items equal to per_page.
(([1], 1, 0, True), (1, 1, [1])),
(([1, 2], 2, 0, True), (2, 1, [1])),
((ten, 10, 0, True), (10, 1, [1])),
# Number if items one more than per_page.
(([1, 2], 1, 0, True), (2, 2, [1, 2])),
(([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),
((eleven, 10, 0, True), (11, 2, [1, 2])),
# Number if items one more than per_page with one orphan.
(([1, 2], 1, 1, True), (2, 1, [1])),
(([1, 2, 3], 2, 1, True), (3, 1, [1])),
((eleven, 10, 1, True), (11, 1, [1])),
# Non-integer inputs
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
)
for params, output in tests:
self.check_paginator(params, output)
def test_invalid_page_number(self):
"""
Tests that invalid page numbers result in the correct exception being
raised.
"""
paginator = Paginator([1, 2, 3], 2)
self.assertRaises(InvalidPage, paginator.page, 3)
self.assertRaises(PageNotAnInteger, paginator.validate_number, None)
self.assertRaises(PageNotAnInteger, paginator.validate_number, 'x')
# With no content and allow_empty_first_page=True, 1 is a valid page number
paginator = Paginator([], 2)
self.assertEqual(paginator.validate_number(1), 1)
def test_paginate_misc_classes(self):
class CountContainer(object):
def count(self):
return 42
# Paginator can be passed other objects with a count() method.
paginator = Paginator(CountContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
# Paginator can be passed other objects that implement __len__.
class LenContainer(object):
def __len__(self):
return 42
paginator = Paginator(LenContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
def check_indexes(self, params, page_num, indexes):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that the start and end indexes of the passed
page_num match those given as a 2-tuple in indexes.
"""
paginator = Paginator(*params)
if page_num == 'first':
page_num = 1
elif page_num == 'last':
page_num = paginator.num_pages
page = paginator.page(page_num)
start, end = indexes
msg = ("For %s of page %s, expected %s but got %s."
" Paginator parameters were: %s")
self.assertEqual(start, page.start_index(),
msg % ('start index', page_num, start, page.start_index(), params))
self.assertEqual(end, page.end_index(),
msg % ('end index', page_num, end, page.end_index(), params))
def test_page_indexes(self):
"""
Tests that paginator pages have the correct start and end indexes.
"""
ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tests = (
# Each item is three tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is the start and end indexes of the first page.
# Third tuple is the start and end indexes of the last page.
# Ten items, varying per_page, no orphans.
((ten, 1, 0, True), (1, 1), (10, 10)),
((ten, 2, 0, True), (1, 2), (9, 10)),
((ten, 3, 0, True), (1, 3), (10, 10)),
((ten, 5, 0, True), (1, 5), (6, 10)),
# Ten items, varying per_page, with orphans.
((ten, 1, 1, True), (1, 1), (9, 10)),
((ten, 1, 2, True), (1, 1), (8, 10)),
((ten, 3, 1, True), (1, 3), (7, 10)),
((ten, 3, 2, True), (1, 3), (7, 10)),
((ten, 3, 4, True), (1, 3), (4, 10)),
((ten, 5, 1, True), (1, 5), (6, 10)),
((ten, 5, 2, True), (1, 5), (6, 10)),
((ten, 5, 5, True), (1, 10), (1, 10)),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1), (1, 1)),
(([1], 4, 1, False), (1, 1), (1, 1)),
(([1], 4, 2, False), (1, 1), (1, 1)),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1), (1, 1)),
(([1], 4, 1, True), (1, 1), (1, 1)),
(([1], 4, 2, True), (1, 1), (1, 1)),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 0), (0, 0)),
(([], 4, 1, True), (0, 0), (0, 0)),
(([], 4, 2, True), (0, 0), (0, 0)),
)
for params, first, last in tests:
self.check_indexes(params, 'first', first)
self.check_indexes(params, 'last', last)
# When no items and no empty first page, we should get EmptyPage error.
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 0, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 1, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 2, False), 1, None)
def test_page_sequence(self):
"""
Tests that a paginator page acts like a standard sequence.
"""
eleven = 'abcdefghijk'
page2 = Paginator(eleven, per_page=5, orphans=1).page(2)
self.assertEqual(len(page2), 6)
self.assertIn('k', page2)
self.assertNotIn('a', page2)
self.assertEqual(''.join(page2), 'fghijk')
self.assertEqual(''.join(reversed(page2)), 'kjihgf')
def test_get_page_hook(self):
"""
Tests that a Paginator subclass can use the ``_get_page`` hook to
return an alternative to the standard Page class.
"""
eleven = 'abcdefghijk'
paginator = ValidAdjacentNumsPaginator(eleven, per_page=6)
page1 = paginator.page(1)
page2 = paginator.page(2)
self.assertIsNone(page1.previous_page_number())
self.assertEqual(page1.next_page_number(), 2)
self.assertEqual(page2.previous_page_number(), 1)
self.assertIsNone(page2.next_page_number())
def test_page_range_iterator(self):
"""
Paginator.page_range should be an iterator.
"""
self.assertIsInstance(Paginator([1, 2, 3], 2).page_range, type(six.moves.range(0)))
class ModelPaginationTests(TestCase):
"""
Test pagination with Django model instances
"""
def setUp(self):
# Prepare a list of objects for pagination.
for x in range(1, 10):
a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29))
a.save()
def test_first_page(self):
paginator = Paginator(Article.objects.all(), 5)
p = paginator.page(1)
self.assertEqual("<Page 1 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 1>",
"<Article: Article 2>",
"<Article: Article 3>",
"<Article: Article 4>",
"<Article: Article 5>"
],
ordered=False
)
self.assertTrue(p.has_next())
self.assertFalse(p.has_previous())
self.assertTrue(p.has_other_pages())
self.assertEqual(2, p.next_page_number())
self.assertRaises(InvalidPage, p.previous_page_number)
self.assertEqual(1, p.start_index())
self.assertEqual(5, p.end_index())
def test_last_page(self):
paginator = Paginator(Article.objects.all(), 5)
p = paginator.page(2)
self.assertEqual("<Page 2 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 6>",
"<Article: Article 7>",
"<Article: Article 8>",
"<Article: Article 9>"
],
ordered=False
)
self.assertFalse(p.has_next())
self.assertTrue(p.has_previous())
self.assertTrue(p.has_other_pages())
self.assertRaises(InvalidPage, p.next_page_number)
self.assertEqual(1, p.previous_page_number())
self.assertEqual(6, p.start_index())
self.assertEqual(9, p.end_index())
def test_page_getitem(self):
"""
Tests proper behavior of a paginator page __getitem__ (queryset
evaluation, slicing, exception raised).
"""
paginator = Paginator(Article.objects.all(), 5)
p = paginator.page(1)
# Make sure object_list queryset is not evaluated by an invalid __getitem__ call.
# (this happens from the template engine when using eg: {% page_obj.has_previous %})
self.assertIsNone(p.object_list._result_cache)
self.assertRaises(TypeError, lambda: p['has_previous'])
self.assertIsNone(p.object_list._result_cache)
self.assertNotIsInstance(p.object_list, list)
# Make sure slicing the Page object with numbers and slice objects work.
self.assertEqual(p[0], Article.objects.get(headline='Article 1'))
self.assertQuerysetEqual(p[slice(2)], [
"<Article: Article 1>",
"<Article: Article 2>",
]
)
# After __getitem__ is called, object_list is a list
self.assertIsInstance(p.object_list, list)
| bsd-3-clause |
mellowizz/ocny_tax_info | qgis_show_ocny_tax_info.py | 1 | 1143 | from qgis.core import *
from qgis.gui import *
import mechanize
import cookielib
@qgsfunction(args='auto', group='Custom')
def show_tax_info(pin, feature, parent):
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
url = 'http://propertydata.orangecountygov.com/imate/propdetail.aspx'
# first 4 of PIN are town code: str(pin)[0:4]
# search = '/'.join([BASE_URL, 'viewlist.aspx?sort=printkey&swis={tcode}'])
# get cookie
br.open('http://www.co.orange.ny.us/content/124/1368/4136.aspx')
for link in br.links():
if 'index.aspx' in link.url:
br.follow_link(link)
break
swis = str(pin)[:6]
printkey = str(pin)[6:]
search_terms = 'swis={}&printkey={}'.format(swis, printkey)
full_url = '?'.join([url, search_terms])
response = br.open(full_url)
return response.read()
| gpl-3.0 |
zzzeek/alembic | alembic/util/compat.py | 2 | 3714 | import collections
import inspect
import io
import os
is_posix = os.name == "posix"
ArgSpec = collections.namedtuple(
"ArgSpec", ["args", "varargs", "keywords", "defaults"]
)
def inspect_getargspec(func):
"""getargspec based on fully vendored getfullargspec from Python 3.3."""
if inspect.ismethod(func):
func = func.__func__
if not inspect.isfunction(func):
raise TypeError("{!r} is not a Python function".format(func))
co = func.__code__
if not inspect.iscode(co):
raise TypeError("{!r} is not a code object".format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
nargs += nkwargs
varargs = None
if co.co_flags & inspect.CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & inspect.CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return ArgSpec(args, varargs, varkw, func.__defaults__)
string_types = (str,)
binary_type = bytes
text_type = str
def _formatannotation(annotation, base_module=None):
"""vendored from python 3.7"""
if getattr(annotation, "__module__", None) == "typing":
return repr(annotation).replace("typing.", "")
if isinstance(annotation, type):
if annotation.__module__ in ("builtins", base_module):
return annotation.__qualname__
return annotation.__module__ + "." + annotation.__qualname__
return repr(annotation)
def inspect_formatargspec(
args,
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=(),
kwonlydefaults={},
annotations={},
formatarg=str,
formatvarargs=lambda name: "*" + name,
formatvarkw=lambda name: "**" + name,
formatvalue=lambda value: "=" + repr(value),
formatreturns=lambda text: " -> " + text,
formatannotation=_formatannotation,
):
"""Copy formatargspec from python 3.7 standard library.
Python 3 has deprecated formatargspec and requested that Signature
be used instead, however this requires a full reimplementation
of formatargspec() in terms of creating Parameter objects and such.
Instead of introducing all the object-creation overhead and having
to reinvent from scratch, just copy their compatibility routine.
"""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ": " + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append("*")
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = "(" + ", ".join(specs) + ")"
if "return" in annotations:
result += formatreturns(formatannotation(annotations["return"]))
return result
# produce a wrapper that allows encoded text to stream
# into a given buffer, but doesn't close it.
# not sure of a more idiomatic approach to this.
class EncodedIO(io.TextIOWrapper):
def close(self):
pass
| mit |
mspark93/VTK | ThirdParty/Twisted/twisted/web/test/test_stan.py | 41 | 4762 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web._stan} portion of the L{twisted.web.template}
implementation.
"""
from twisted.web.template import Comment, CDATA, CharRef, Tag
from twisted.trial.unittest import TestCase
def proto(*a, **kw):
"""
Produce a new tag for testing.
"""
return Tag('hello')(*a, **kw)
class TestTag(TestCase):
"""
Tests for L{Tag}.
"""
def test_fillSlots(self):
"""
L{Tag.fillSlots} returns self.
"""
tag = proto()
self.assertIdentical(tag, tag.fillSlots(test='test'))
def test_cloneShallow(self):
"""
L{Tag.clone} copies all attributes and children of a tag, including its
render attribute. If the shallow flag is C{False}, that's where it
stops.
"""
innerList = ["inner list"]
tag = proto("How are you", innerList,
hello="world", render="aSampleMethod")
tag.fillSlots(foo='bar')
tag.filename = "foo/bar"
tag.lineNumber = 6
tag.columnNumber = 12
clone = tag.clone(deep=False)
self.assertEqual(clone.attributes['hello'], 'world')
self.assertNotIdentical(clone.attributes, tag.attributes)
self.assertEqual(clone.children, ["How are you", innerList])
self.assertNotIdentical(clone.children, tag.children)
self.assertIdentical(clone.children[1], innerList)
self.assertEqual(tag.slotData, clone.slotData)
self.assertNotIdentical(tag.slotData, clone.slotData)
self.assertEqual(clone.filename, "foo/bar")
self.assertEqual(clone.lineNumber, 6)
self.assertEqual(clone.columnNumber, 12)
self.assertEqual(clone.render, "aSampleMethod")
def test_cloneDeep(self):
"""
L{Tag.clone} copies all attributes and children of a tag, including its
render attribute. In its normal operating mode (where the deep flag is
C{True}, as is the default), it will clone all sub-lists and sub-tags.
"""
innerTag = proto("inner")
innerList = ["inner list"]
tag = proto("How are you", innerTag, innerList,
hello="world", render="aSampleMethod")
tag.fillSlots(foo='bar')
tag.filename = "foo/bar"
tag.lineNumber = 6
tag.columnNumber = 12
clone = tag.clone()
self.assertEqual(clone.attributes['hello'], 'world')
self.assertNotIdentical(clone.attributes, tag.attributes)
self.assertNotIdentical(clone.children, tag.children)
# sanity check
self.assertIdentical(tag.children[1], innerTag)
# clone should have sub-clone
self.assertNotIdentical(clone.children[1], innerTag)
# sanity check
self.assertIdentical(tag.children[2], innerList)
# clone should have sub-clone
self.assertNotIdentical(clone.children[2], innerList)
self.assertEqual(tag.slotData, clone.slotData)
self.assertNotIdentical(tag.slotData, clone.slotData)
self.assertEqual(clone.filename, "foo/bar")
self.assertEqual(clone.lineNumber, 6)
self.assertEqual(clone.columnNumber, 12)
self.assertEqual(clone.render, "aSampleMethod")
def test_clear(self):
"""
L{Tag.clear} removes all children from a tag, but leaves its attributes
in place.
"""
tag = proto("these are", "children", "cool", andSoIs='this-attribute')
tag.clear()
self.assertEqual(tag.children, [])
self.assertEqual(tag.attributes, {'andSoIs': 'this-attribute'})
def test_suffix(self):
"""
L{Tag.__call__} accepts Python keywords with a suffixed underscore as
the DOM attribute of that literal suffix.
"""
proto = Tag('div')
tag = proto()
tag(class_='a')
self.assertEqual(tag.attributes, {'class': 'a'})
def test_commentRepr(self):
"""
L{Comment.__repr__} returns a value which makes it easy to see what's in
the comment.
"""
self.assertEqual(repr(Comment(u"hello there")),
"Comment(u'hello there')")
def test_cdataRepr(self):
"""
L{CDATA.__repr__} returns a value which makes it easy to see what's in
the comment.
"""
self.assertEqual(repr(CDATA(u"test data")),
"CDATA(u'test data')")
def test_charrefRepr(self):
"""
L{CharRef.__repr__} returns a value which makes it easy to see what
character is referred to.
"""
snowman = ord(u"\N{SNOWMAN}")
self.assertEqual(repr(CharRef(snowman)), "CharRef(9731)")
| bsd-3-clause |
ccastell/Transfer-System | Website/env/lib/python3.5/site-packages/django/core/management/commands/sendtestemail.py | 126 | 1518 | import socket
from django.core.mail import mail_admins, mail_managers, send_mail
from django.core.management.base import BaseCommand
from django.utils import timezone
class Command(BaseCommand):
help = "Sends a test email to the email addresses specified as arguments."
missing_args_message = "You must specify some email recipients, or pass the --managers or --admin options."
def add_arguments(self, parser):
parser.add_argument(
'email', nargs='*',
help='One or more email addresses to send a test email to.',
)
parser.add_argument(
'--managers', action='store_true', dest='managers', default=False,
help='Send a test email to the addresses specified in settings.MANAGERS.',
)
parser.add_argument(
'--admins', action='store_true', dest='admins', default=False,
help='Send a test email to the addresses specified in settings.ADMINS.',
)
def handle(self, *args, **kwargs):
subject = 'Test email from %s on %s' % (socket.gethostname(), timezone.now())
send_mail(
subject=subject,
message="If you\'re reading this, it was successful.",
from_email=None,
recipient_list=kwargs['email'],
)
if kwargs['managers']:
mail_managers(subject, "This email was sent to the site managers.")
if kwargs['admins']:
mail_admins(subject, "This email was sent to the site admins.")
| apache-2.0 |
eduNEXT/edx-sga | edx_sga/tests/test_sga.py | 2 | 31225 | """
Tests for SGA
"""
# pylint: disable=imported-auth-user
import builtins
import datetime
import json
import mimetypes
import os
import uuid
from unittest import mock
import pytest
import pytz
from ddt import data, ddt, unpack
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.timezone import now as django_now
from opaque_keys.edx.locations import Location
from opaque_keys.edx.locator import CourseLocator
from workbench.runtime import WorkbenchRuntime
from xblock.field_data import DictFieldData
from xblock.fields import DateTime
from edx_sga.tests.common import DummyResource, TempfileMixin
SHA1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
UUID = '8c4b765745f746f7a128470842211601'
pytestmark = pytest.mark.django_db
def fake_get_submission(**kwargs):
"""returns fake submission data"""
answer = {
"sha1": SHA1,
"filename": kwargs.get("filename", "file.txt"),
"mimetype": kwargs.get("mimetype", "mime/type"),
}
if kwargs.get("finalized"):
answer["finalized"] = kwargs.get("finalized")
return {
"answer": answer,
"uuid": UUID,
"submitted_at": kwargs.get("submitted_at", None)
}
def fake_upload_submission(upload):
"""returns fake submission data with values calculated from an upload object"""
return fake_get_submission(
filename=upload.file.name,
mimetype=mimetypes.guess_type(upload.file.name)[0]
)
def fake_student_module():
"""dummy representation of xblock class"""
return mock.Mock(
course_id=CourseLocator(org='foo', course='baz', run='bar'),
module_state_key="foo",
student=mock.Mock(username="fred6", is_staff=False, password="test"),
state='{"display_name": "Staff Graded Assignment"}',
save=mock.Mock()
)
class FakeWorkbenchRuntime(WorkbenchRuntime):
"""Override for testing purposes"""
anonymous_student_id = 'MOCK'
user_is_staff = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
User.objects.create(username=self.anonymous_student_id)
def get_real_user(self, username):
"""Get the real user"""
return User.objects.get(username=username)
@ddt
class StaffGradedAssignmentMockedTests(TempfileMixin):
"""
Create a SGA block with mock data.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.set_up_temp_directory()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.tear_down_temp_directory()
def setUp(self):
"""
Creates a test course ID, mocks the runtime, and creates a fake storage
engine for use in all tests
"""
super().setUp()
# fakes imports
real_import = builtins.__import__
def fake_import(name, *args, **kwargs):
"""mock imported object if not it is not available"""
try:
return real_import(name, *args, **kwargs)
except ImportError:
for module in ('common', 'courseware', 'lms', 'xmodule'):
if name.startswith(f"{module}.") or name == module:
return mock.Mock()
if name == 'safe_lxml':
return real_import('lxml', *args, **kwargs)
raise
builtins.__import__ = fake_import
def restore_import():
"""restore builtin importer"""
builtins.__import__ = real_import
self.addCleanup(restore_import)
self.course_id = CourseLocator(org='foo', course='baz', run='bar')
self.runtime = FakeWorkbenchRuntime()
self.scope_ids = mock.Mock()
self.staff = mock.Mock(return_value={
"password": "test",
"username": "tester",
"is_staff": True
})
def make_xblock(self, display_name=None, **kwargs):
"""
Creates a XBlock SGA for testing purpose.
"""
from edx_sga.sga import StaffGradedAssignmentXBlock as cls # pylint: disable=import-outside-toplevel
field_data = DictFieldData(kwargs)
block = cls(self.runtime, field_data, self.scope_ids)
block.location = Location(
'foo', 'bar', 'baz', 'category', 'name', 'revision'
)
block.xmodule_runtime = self.runtime
block.course_id = self.course_id
block.scope_ids.usage_id = "i4x://foo/bar/category/name"
block.category = 'problem'
if display_name:
block.display_name = display_name
block.start = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=pytz.utc)
return block
def test_ctor(self):
"""
Test points are set correctly.
"""
block = self.make_xblock(points=10)
assert block.display_name == "Staff Graded Assignment"
assert block.points == 10
def test_max_score(self):
"""
Text max score is set correctly.
"""
block = self.make_xblock(points=20)
assert block.max_score() == 20
def test_max_score_integer(self):
"""
Test assigning a float max score is rounded to nearest integer.
"""
block = self.make_xblock(points=20.4)
assert block.max_score() == 20
def personalize_upload(self, block, upload):
"""
Set values on block from file upload.
"""
now = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(getattr(settings, "TIME_ZONE", pytz.utc.zone)))
block.annotated_mimetype = mimetypes.guess_type(upload.file.name)[0]
block.annotated_filename = upload.file.name.encode('utf-8')
block.annotated_sha1 = SHA1
block.annotated_timestamp = now.strftime(
DateTime.DATETIME_FORMAT
)
@mock.patch('edx_sga.sga._resource', DummyResource)
@mock.patch('edx_sga.sga.render_template')
@mock.patch('edx_sga.sga.Fragment')
def test_student_view(self, fragment, render_template):
"""
Test student view renders correctly.
"""
block = self.make_xblock("Custom name")
with mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_submission',
return_value={}
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.student_state',
return_value={
'uploaded': None,
'annotated': None,
'upload_allowed': True,
'max_score': 100,
'graded': None
}
):
fragment = block.student_view()
assert render_template.called is True
template_arg = render_template.call_args[0][0]
assert template_arg == 'templates/staff_graded_assignment/show.html'
context = render_template.call_args[0][1]
assert context['is_course_staff'] is True
assert context['id'] == 'name'
student_state = json.loads(context['student_state'])
assert student_state['uploaded'] is None
assert student_state['annotated'] is None
assert student_state['upload_allowed'] is True
assert student_state['max_score'] == 100
assert student_state['graded'] is None
# pylint: disable=no-member
fragment.add_css.assert_called_once_with(
DummyResource("static/css/edx_sga.css"))
fragment.initialize_js.assert_called_once_with(
"StaffGradedAssignmentXBlock")
@mock.patch('edx_sga.sga._resource', DummyResource)
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.upload_allowed')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.get_score')
@mock.patch('edx_sga.sga.render_template')
@mock.patch('edx_sga.sga.Fragment')
def test_student_view_with_score(self, fragment, render_template, get_score, upload_allowed):
"""
Tests scores are displayed correctly on student view.
"""
block = self.make_xblock()
get_score.return_value = 10
upload_allowed.return_value = True
block.comment = "ok"
with self.dummy_upload('foo.txt') as (upload, _):
with mock.patch(
'submissions.api.create_submission',
) as mocked_create_submission, mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.student_state', return_value={}
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_or_create_student_module',
return_value=fake_student_module()
):
block.upload_assignment(mock.Mock(params={'assignment': upload}))
assert mocked_create_submission.called is True
with mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_submission',
return_value=fake_upload_submission(upload)
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.student_state',
return_value={
'graded': {'comment': 'ok', 'score': 10},
'uploaded': {'filename': 'foo.txt'},
'max_score': 100
}
):
fragment = block.student_view()
assert render_template.called is True
template_arg = render_template.call_args[0][0]
assert template_arg == 'templates/staff_graded_assignment/show.html'
context = render_template.call_args[0][1]
assert context['is_course_staff'] is True
assert context['id'] == 'name'
student_state = json.loads(context['student_state'])
assert student_state['uploaded'] == {'filename': 'foo.txt'}
assert student_state['graded'] == {'comment': 'ok', 'score': 10}
assert student_state['max_score'] == 100
# pylint: disable=no-member
fragment.add_css.assert_called_once_with(
DummyResource("static/css/edx_sga.css"))
fragment.initialize_js.assert_called_once_with(
"StaffGradedAssignmentXBlock")
def test_studio_view(self):
"""
Test studio view is using the StudioEditableXBlockMixin function
"""
with mock.patch('edx_sga.sga.StudioEditableXBlockMixin.studio_view') as studio_view_mock:
block = self.make_xblock()
block.studio_view()
studio_view_mock.assert_called_once_with(None)
def test_save_sga(self):
"""
Tests save SGA block on studio
"""
def weights_positive_float_test():
"""
tests weight is non negative float.
"""
orig_weight = 11.0
# Test negative weight doesn't work
block.save_sga(mock.Mock(method="POST", body=json.dumps({
"display_name": "Test Block",
"points": '100',
"weight": -10.0}).encode('utf-8')))
assert block.weight == orig_weight
# Test string weight doesn't work
block.save_sga(mock.Mock(method="POST", body=json.dumps({
"display_name": "Test Block",
"points": '100',
"weight": "a"}).encode('utf-8')))
assert block.weight == orig_weight
def point_positive_int_test():
"""
Tests point is positive number.
"""
# Test negative doesn't work
block.save_sga(mock.Mock(method="POST", body=json.dumps({
"display_name": "Test Block",
"points": '-10',
"weight": 11}).encode('utf-8')))
assert block.points == orig_score
# Test float doesn't work
block.save_sga(mock.Mock(method="POST", body=json.dumps({
"display_name": "Test Block",
"points": '24.5',
"weight": 11}).encode('utf-8')))
assert block.points == orig_score
orig_score = 23
block = self.make_xblock()
block.save_sga(mock.Mock(body='{}'))
assert block.display_name == "Staff Graded Assignment"
assert block.points == 100
assert block.weight is None
block.save_sga(mock.Mock(method="POST", body=json.dumps({
"display_name": "Test Block",
"points": orig_score,
"weight": 11}).encode('utf-8')))
assert block.display_name == "Test Block"
assert block.points == orig_score
assert block.weight == 11
point_positive_int_test()
weights_positive_float_test()
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.get_student_item_dict')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.upload_allowed')
@mock.patch('edx_sga.sga.get_sha1')
def test_upload_download_assignment(self, get_sha1, upload_allowed, get_student_item_dict):
# pylint: disable=unused-argument
"""
Tests upload and download assignment for non staff.
"""
file_name = 'test.txt'
block = self.make_xblock()
get_student_item_dict.return_value = {
"student_id": 1,
"course_id": block.block_course_id,
"item_id": block.block_id,
"item_type": 'sga',
}
upload_allowed.return_value = True
with self.dummy_upload(file_name) as (upload, expected):
with mock.patch('submissions.api.create_submission') as mocked_create_submission, mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.file_storage_path",
return_value=block.file_storage_path(SHA1, file_name)
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.student_state', return_value={}
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_or_create_student_module',
return_value=fake_student_module()
) as mocked_create_student_module:
block.upload_assignment(mock.Mock(params={'assignment': upload}))
assert mocked_create_submission.called is True
assert mocked_create_student_module.called is True
with mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_submission',
return_value=fake_upload_submission(upload)
), mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.file_storage_path",
return_value=block.file_storage_path(SHA1, file_name)
):
response = block.download_assignment(None)
assert response.body == expected
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.file_storage_path",
return_value=block.file_storage_path("", "test_notfound.txt")
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_submission',
return_value=fake_upload_submission(upload)
):
response = block.download_assignment(None)
assert response.status_code == 404
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.get_student_item_dict')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.upload_allowed')
@data(({'finalized': False}, True), ({}, True), ({'finalized': True}, False))
@unpack
def test_finalize_uploaded_assignment(
self, finalized_setting, model_change_expected, upload_allowed, get_student_item_dict
):
"""
Tests that finalize_uploaded_assignment sets a submission to be finalized
"""
block = self.make_xblock()
get_student_item_dict.return_value = {
"student_id": 1,
"course_id": block.block_course_id,
"item_id": block.block_id,
"item_type": 'sga',
}
upload_allowed.return_value = True
existing_submitted_at_value = django_now()
fake_submission_data = fake_get_submission(**finalized_setting)
fake_submission_object = mock.Mock(
submitted_at=existing_submitted_at_value,
answer=fake_submission_data['answer']
)
with mock.patch(
'edx_sga.sga.Submission.objects.get', return_value=fake_submission_object
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_submission', return_value=fake_submission_data
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.student_state', return_value={}
):
block.finalize_uploaded_assignment(mock.Mock())
assert fake_submission_object.answer['finalized'] is True
assert (existing_submitted_at_value != fake_submission_object.submitted_at) is model_change_expected
assert fake_submission_object.save.called is model_change_expected
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.get_student_module')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.is_course_staff')
@mock.patch('edx_sga.sga.get_sha1')
def test_staff_upload_download_annotated(self, get_sha1, is_course_staff, get_student_module):
"""
Tests upload and download of annotated staff files.
"""
get_student_module.return_value = fake_student_module()
is_course_staff.return_value = True
get_sha1.return_value = SHA1
file_name = 'test.txt'
block = self.make_xblock()
with self.dummy_upload(file_name) as (upload, expected), mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.staff_grading_data",
return_value={}
) as staff_grading_data:
block.staff_upload_annotated(mock.Mock(params={'annotated': upload, 'module_id': 1}))
assert staff_grading_data.called is True
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.file_storage_path",
return_value=block.file_storage_path(SHA1, file_name)
):
response = block.staff_download_annotated(mock.Mock(params={'module_id': 1}))
assert response.body == expected
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.file_storage_path",
return_value=block.file_storage_path("", "test_notfound.txt")
):
response = block.staff_download_annotated(
mock.Mock(params={'module_id': 1})
)
assert response.status_code == 404
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.get_student_module')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.is_course_staff')
@mock.patch('edx_sga.sga.get_sha1')
def test_download_annotated(self, get_sha1, is_course_staff, get_student_module):
"""
Test download annotated assignment for non staff.
"""
get_student_module.return_value = fake_student_module()
is_course_staff.return_value = True
get_sha1.return_value = SHA1
file_name = 'test.txt'
block = self.make_xblock()
with self.dummy_upload(file_name) as (upload, expected):
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.staff_grading_data",
return_value={}
) as staff_grading_data:
block.staff_upload_annotated(mock.Mock(params={
'annotated': upload,
'module_id': 1
}))
assert staff_grading_data.called is True
self.personalize_upload(block, upload)
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.file_storage_path",
return_value=block.file_storage_path(SHA1, file_name)
):
response = block.download_annotated(None)
assert response.body == expected
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.file_storage_path",
return_value=block.file_storage_path("", "test_notfound.txt")
):
response = block.download_annotated(None)
assert response.status_code == 404
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.upload_allowed')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.get_student_module')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.is_course_staff')
@mock.patch('edx_sga.sga.get_sha1')
def test_staff_download(self, get_sha1, is_course_staff, get_student_module, upload_allowed):
"""
Test download for staff.
"""
get_student_module.return_value = fake_student_module()
is_course_staff.return_value = True
upload_allowed.return_value = True
get_sha1.return_value = SHA1
block = self.make_xblock()
with self.dummy_upload('test.txt') as (upload, expected), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.student_state', return_value={}
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_or_create_student_module',
return_value=fake_student_module()
), mock.patch(
'submissions.api.create_submission'
) as mocked_create_submission:
block.upload_assignment(mock.Mock(params={'assignment': upload}))
assert mocked_create_submission.called is True
self.personalize_upload(block, upload)
with mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_submission',
return_value=fake_upload_submission(upload)
):
response = block.staff_download(mock.Mock(params={
'student_id': 1}))
assert response.body == expected
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.file_storage_path",
return_value=block.file_storage_path("", "test_notfound.txt")
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_submission',
return_value=fake_upload_submission(upload)
):
response = block.staff_download(
mock.Mock(params={'student_id': 1})
)
assert response.status_code == 404
@unpack
@data(
{'past_due': False, 'score': None, 'is_finalized_submission': False, 'expected_value': True},
{'past_due': True, 'score': None, 'is_finalized_submission': False, 'expected_value': False},
{'past_due': False, 'score': 80, 'is_finalized_submission': False, 'expected_value': False},
{'past_due': False, 'score': None, 'is_finalized_submission': True, 'expected_value': False},
)
def test_upload_allowed(self, past_due, score, is_finalized_submission, expected_value):
"""
Tests that upload_allowed returns the right value under certain conditions
"""
block = self.make_xblock()
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.past_due",
return_value=past_due
), mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.get_score",
return_value=score
), mock.patch(
"edx_sga.sga.is_finalized_submission",
return_value=is_finalized_submission
):
assert block.upload_allowed(submission_data={}) is expected_value
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.count_archive_files')
@mock.patch('edx_sga.sga.zip_student_submissions')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.get_sorted_submissions')
@data((False, False), (True, True))
@unpack
def test_prepare_download_submissions(
self,
is_zip_file_available,
downloadable,
get_sorted_submissions,
zip_student_submissions,
count_archive_files
):
"""
Test prepare download api
"""
block = self.make_xblock()
count_archive_files.return_value = 2
get_sorted_submissions.return_value = [
{
'submission_id': uuid.uuid4().hex,
'filename': f"test_{uuid.uuid4().hex}.txt",
'timestamp': datetime.datetime.now(tz=pytz.utc)
} for __ in range(2)
]
zip_student_submissions.delay = mock.Mock()
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.is_zip_file_available",
return_value=is_zip_file_available
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_real_user',
return_value=self.staff
), mock.patch(
'edx_sga.utils.default_storage.get_modified_time',
return_value=datetime.datetime.now()
):
response = block.prepare_download_submissions(None)
response_body = json.loads(response.body.decode('utf-8'))
assert response_body["downloadable"] is downloadable
@mock.patch('edx_sga.sga.get_file_modified_time_utc')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.count_archive_files')
@mock.patch('edx_sga.sga.zip_student_submissions')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.get_sorted_submissions')
@data((2, True, False), (1, False, True))
@unpack
def test_prepare_download_submissions_when_student_score_reset(
self,
count_archive_files,
downloadable,
zip_task_called,
get_sorted_submissions,
zip_student_submissions,
count_archive_files_mock,
get_file_modified_time_utc
):
"""
Test prepare download api
"""
now = datetime.datetime.now(tz=pytz.utc)
block = self.make_xblock()
count_archive_files_mock.return_value = count_archive_files
get_sorted_submissions.return_value = [
{
'submission_id': uuid.uuid4().hex,
'filename': f"test_{uuid.uuid4().hex}.txt",
'timestamp': now
} for __ in range(2)
]
get_file_modified_time_utc.return_value = now
zip_student_submissions.delay = mock.Mock()
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.is_zip_file_available", return_value=True
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_real_user', return_value=self.staff
), mock.patch(
'edx_sga.utils.default_storage.get_modified_time', return_value=datetime.datetime.now()
):
response = block.prepare_download_submissions(None)
response_body = json.loads(response.body.decode('utf-8'))
assert response_body["downloadable"] is downloadable
assert zip_student_submissions.delay.called is zip_task_called
@mock.patch('edx_sga.sga.zip_student_submissions')
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.get_sorted_submissions')
def test_prepare_download_submissions_task_called(
self,
get_sorted_submissions,
zip_student_submissions
):
"""
Test prepare download api
"""
block = self.make_xblock()
get_sorted_submissions.return_value = [
{
'submission_id': uuid.uuid4().hex,
'filename': f"test_{uuid.uuid4().hex}.txt",
'timestamp': datetime.datetime.utcnow()
} for __ in range(2)
]
zip_student_submissions.delay = mock.Mock()
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.is_zip_file_available",
return_value=False
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_real_user',
return_value=self.staff
), mock.patch(
'edx_sga.sga.default_storage.get_modified_time',
return_value=datetime.datetime.now()
):
response = block.prepare_download_submissions(None)
response_body = json.loads(response.body.decode('utf-8'))
assert response_body["downloadable"] is False
zip_student_submissions.delay.assert_called_once_with(
str(block.block_course_id),
str(block.block_id),
str(block.location),
self.staff.username
)
@data((False, False), (True, True))
@unpack
def test_download_submissions_status(self, is_zip_file_available, downloadable):
"""test download_submissions_status api"""
block = self.make_xblock()
with mock.patch(
"edx_sga.sga.StaffGradedAssignmentXBlock.is_zip_file_available",
return_value=is_zip_file_available
):
response = block.download_submissions_status(None)
response_body = json.loads(response.body.decode('utf-8'))
assert response_body["zip_available"] is downloadable
@mock.patch('edx_sga.sga.StaffGradedAssignmentXBlock.is_course_staff')
def test_download_submissions(self, is_course_staff):
"""tests download_submissions"""
block = self.make_xblock()
is_course_staff.return_value = True
expected = b"some information blah"
filename = "foo.zip"
path = os.path.join(self.temp_directory, filename)
with open(path, "wb") as temp_file:
temp_file.write(expected)
with mock.patch(
"edx_sga.sga.get_zip_file_path", return_value=path
), mock.patch(
'edx_sga.sga.StaffGradedAssignmentXBlock.get_real_user',
return_value=self.staff
), mock.patch(
"edx_sga.sga.get_zip_file_name", return_value=filename
):
response = block.download_submissions(None)
assert response.status_code == 200
assert response.body == expected
def test_clear_student_state(self):
"""Tests that a student's state in the given problem is properly cleared"""
block = self.make_xblock()
orig_file_name = 'test.txt'
fake_submission = fake_get_submission(filename=orig_file_name)
uploaded_file_path = block.file_storage_path(SHA1, orig_file_name)
with self.dummy_file_in_storage(uploaded_file_path) as file_path:
with mock.patch(
"edx_sga.sga.submissions_api.get_submissions",
return_value=[fake_submission]
) as mocked_get_submissions, mock.patch(
"edx_sga.sga.submissions_api.reset_score"
) as mocked_reset_score:
assert self.default_storage.exists(file_path) is True
block.clear_student_state(user_id=123)
assert mocked_get_submissions.called is True
# Clearing the student state should call 'reset_score' in the submission API,
# which effectively resets the Submission record.
assert mocked_reset_score.called is True
# Clearing the student state should also delete the uploaded file
assert self.default_storage.exists(file_path) is False
| agpl-3.0 |
AlexWoo/pyed | pysys/pycmdserver.py | 1 | 1693 | from pyevent.event import event
from pyevent.tcpserver import tcpserver
from pyevent.tcpconnection import tcpconnection
class cmdserver(tcpserver):
def __init__(self, pesys):
self.evs = pesys.evs
self.tms = pesys.tms
self.log = pesys.log
self.proc = pesys.proc
self.proc.setcmdserver(self)
self.srvconf = pesys.conf.cmdserver
self.c = None
tcpserver.__init__(self, self.accepthandler, self.srvconf,
self.evs, self.tms)
def accepthandler(self, ev):
csock, _ = ev.sock.accept()
if self.c:
csock.close()
self.log.logInfo("CmdServer", "Cmdserver has cmd to process, close new cmdclient")
return
self.c = tcpconnection(csock, self.srvconf, self.evs, self.tms)
self.c.set_recvmsg(self.recvmsg)
self.c.set_broken(self.brokenhandler)
def recvmsg(self, c):
buf = self.c.read()
self.log.logInfo("CmdServer", "Send cmd[%s] to worker", buf.strip())
self.proc.sendcmd(buf)
self.ev = event(self.evs, self.tms)
self.ev.add_timer(5000, self.timeouthandler) # set cmd response timeout to 5s
def sendresp(self, buf, islast):
self.c.write(buf)
if islast:
self.c.close()
self.c = None
self.ev.del_timer()
def brokenhandler(self, c):
self.c = None
self.ev.del_timer()
self.log.logInfo("CmdServer", "Cmdclient link broken")
def timeouthandler(self, ev):
self.log.logInfo("CmdServer", "Wait for Worker response timeout")
self.c.close()
self.c = None
self.ev.del_timer()
| bsd-2-clause |
odyaka341/pyglet | experimental/input/usage.py | 28 | 1240 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import input
# (usage_page, usage): Device class
'''
device_usage_map = {
(0x01, 0x01): input.PointerDevice,
(0x01, 0x01): input.MouseDevice,
(0x01, 0x04): input.JoystickDevice,
(0x01, 0x05): input.GamePadDevice,
(0x01, 0x06): input.KeyboardDevice,
(0x01, 0x07): input.KeypadDevice,
(0x01, 0x08): input.MultiAxisControllerDevice,
}
'''
element_usage_names = {
(0x01, 0x30): 'x',
(0x01, 0x31): 'y',
(0x01, 0x32): 'z',
(0x01, 0x33): 'rx',
(0x01, 0x34): 'ry',
(0x01, 0x35): 'rz',
(0x01, 0x36): 'slider',
(0x01, 0x37): 'dial',
(0x01, 0x38): 'wheel',
(0x01, 0x39): 'hat_switch',
(0x01, 0x3d): 'start',
(0x01, 0x3e): 'select',
}
def get_element_usage_known(usage_page, usage):
if usage_page == 0x09 and usage > 0:
return True
if (usage_page, usage) in element_usage_names:
return True
return False
def get_element_usage_name(usage_page, usage):
if usage_page == 0x09:
return 'button%d' % usage
try:
return element_usage_names[(usage_page, usage)]
except KeyError:
return '(%x, %x)' % (usage_page, usage)
| bsd-3-clause |
ecreall/dace | dace/processinstance/tests/test_signal.py | 1 | 7346 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import transaction
from pyramid.threadlocal import get_current_registry
from dace.interfaces import IProcessDefinition
import dace.processinstance.tests.example.process as example
from dace.processdefinition.processdef import ProcessDefinition
from dace.processdefinition.activitydef import ActivityDefinition
from dace.processdefinition.gatewaydef import ParallelGatewayDefinition
from dace.processdefinition.transitiondef import TransitionDefinition
from dace.processdefinition.eventdef import (
StartEventDefinition,
EndEventDefinition,
IntermediateCatchEventDefinition,
IntermediateThrowEventDefinition,
SignalEventDefinition)
from dace.testing import FunctionalTests
def ref_signal(process):
return "X"
class TestsSignal(FunctionalTests):
def tearDown(self):
registry = get_current_registry()
registry.unregisterUtility(provided=IProcessDefinition)
super(TestsSignal, self).tearDown()
def _process_definition(self):
"""
G1(+), G2(+): parallel gateways
S: start event
E: End event
St: Signal throwing
Sc: Signal catching
A, D: activities
----- ------
-->| A |-->| St |--
----- --------- / ----- ------ \ --------- -----
| S |-->| G1(+) |- ------ ----- -| G2(+) |-->| E |
----- --------- \-->| Sc |->| D |---/ --------- -----
------ -----
"""
pd = ProcessDefinition(**{'id':u'sample'})
self.app['sample'] = pd
pd.defineNodes(
s = StartEventDefinition(),
g1 = ParallelGatewayDefinition(),
g2 = ParallelGatewayDefinition(),
a = ActivityDefinition(),
d = ActivityDefinition(),
e = EndEventDefinition(),
st = IntermediateThrowEventDefinition(
SignalEventDefinition(ref_signal)),
sc = IntermediateCatchEventDefinition(
SignalEventDefinition(ref_signal)),
)
pd.defineTransitions(
TransitionDefinition('s', 'g1'),
TransitionDefinition('g1', 'a'),
TransitionDefinition('g1', 'sc'),
TransitionDefinition('a', 'st'),
TransitionDefinition('sc', 'd'),
TransitionDefinition('st', 'g2'),
TransitionDefinition('d', 'g2'),
TransitionDefinition('g2', 'e'),
)
self.config.scan(example)
return pd
def xtest_signal_event_start_sc(self):
pd = self._process_definition()
self.def_container.add_definition(pd)
start_wi = pd.start_process('sc')['sc']
sc_wi, proc = start_wi.consume()
sc_wi.start_test_activity()
self.assertEqual(len(proc.getWorkItems()), 2)
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.a', 'sample.sc'])
def xtest_signal_event(self):
pd = self._process_definition()
self.def_container.add_definition(pd)
start_wi = pd.start_process('a')['a']
# commit the application
transaction.commit()
a_wi, proc = start_wi.consume()
a_wi.start_test_activity()
transaction.commit()
import time
time.sleep(5)
transaction.begin()
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.d'])
d_wi = proc.getWorkItems()['sample.d']
self.assertEqual(len(proc.getWorkItems()), 1)
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.d'])
d_wi.consume().start_test_activity()
self.assertEqual(len(proc.getWorkItems()), 0)
def _process_definition_with_activity_after_start_event(self):
"""
G1(+), G2(+): parallel gateways
S: start event
E: End event
St: Signal throwing
Sc: Signal catching
A, B, D: activities
----- ------
-->| A |-->| St |--
----- ----- --------- / ----- ------ \ --------- -----
| S |-->| B |-->| G1(+) |- ------ ----- -| G2(+) |-->| E |
----- ----- --------- \-->| Sc |->| D |---/ --------- -----
------ -----
"""
pd = ProcessDefinition(**{'id':u'sample'})
self.app['sample'] = pd
pd.defineNodes(
s = StartEventDefinition(),
g1 = ParallelGatewayDefinition(),
g2 = ParallelGatewayDefinition(),
a = ActivityDefinition(),
b = ActivityDefinition(),
d = ActivityDefinition(),
e = EndEventDefinition(),
st = IntermediateThrowEventDefinition(
SignalEventDefinition(ref_signal)),
sc = IntermediateCatchEventDefinition(
SignalEventDefinition(ref_signal)),
)
pd.defineTransitions(
TransitionDefinition('s', 'b'),
TransitionDefinition('b', 'g1'),
TransitionDefinition('g1', 'a'),
TransitionDefinition('g1', 'sc'),
TransitionDefinition('a', 'st'),
TransitionDefinition('sc', 'd'),
TransitionDefinition('st', 'g2'),
TransitionDefinition('d', 'g2'),
TransitionDefinition('g2', 'e'),
)
self.config.scan(example)
return pd
def test_start_intermediate_events_on_startup(self):
from zope.processlifetime import DatabaseOpenedWithRoot
from dace.processinstance import event
from dace.subscribers import stop_ioloop
pd = self._process_definition_with_activity_after_start_event()
self.def_container.add_definition(pd)
start_wi = pd.start_process('b')['b']
# commit the application
transaction.commit()
b_wi, proc = start_wi.consume()
b_wi.start_test_activity()
transaction.commit()
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.a', 'sample.sc'])
# simulate application shutdown
import time
# we need to wait ZMQStream to start on ioloop side and read
# the Listener from the socket so we have the listener in
# event.callbacks
time.sleep(2.2)
self.assertEqual(len(event.callbacks), 1)
stop_ioloop()
time.sleep(1)
self.assertEqual(len(event.callbacks), 0)
# simulate application startup
e = DatabaseOpenedWithRoot(self.app._p_jar.db())
self.registry.notify(e)
time.sleep(1)
self.assertEqual(len(event.callbacks), 1)
a_wi = proc.getWorkItems()['sample.a']
a_wi.consume().start_test_activity()
# we need to commit so the catching event Job
# see the modified process.
transaction.commit()
# The job wait 2 sec before executing
time.sleep(5)
transaction.begin()
self.assertEqual(sorted(proc.getWorkItems().keys()), ['sample.d'])
| agpl-3.0 |
jriegel/FreeCAD | src/Mod/TemplatePyMod/PythonQt.py | 56 | 2708 | """
Examples for customizing the FreeCAD application with PySide facilities.
(c) 2007 Werner Mayer LGPL
"""
__author__ = "Werner Mayer <werner.wm.mayer@gmx.de>"
from PySide import QtCore,QtGui
import FreeCAD,FreeCADGui, __main__
class MainWindow:
def __init__(self):
self.app = QtGui.qApp
self.mw = FreeCADGui.getMainWindow()
self.dock = {}
def setWindowTitle(self, name):
self.mw.setWindowTitle(name)
def addCalendar(self):
d = QtGui.QDockWidget()
d.setWindowTitle("Calendar")
c = QtGui.QCalendarWidget()
d.setWidget(c)
self.mw.addDockWidget(QtCore.Qt.RightDockWidgetArea,d)
self.dock[d] = c
def information(self, title, text):
QtGui.QMessageBox.information(self.mw, title, text)
def warning(self, title, text):
QtGui.QMessageBox.warning(self.mw, title, text)
def critical(self, title, text):
QtGui.QMessageBox.critical(self.mw, title, text)
def question(self, title, text):
QtGui.QMessageBox.question(self.mw, title, text)
def aboutQt(self):
QtGui.QMessageBox.aboutQt(self.mw, self.mw.tr("About Qt"))
class PythonQtWorkbench (__main__.Workbench):
"Python Qt workbench object"
Icon = "python"
MenuText = "PySide sandbox"
ToolTip = "Python Qt workbench"
def __init__(self):
self.mw = FreeCADGui.getMainWindow()
self.dock = {}
self.item = []
def information(self):
QtGui.QMessageBox.information(self.mw, "Info", "This is an information")
def warning(self):
QtGui.QMessageBox.warning(self.mw, "Warning", "This is a warning")
def critical(self):
QtGui.QMessageBox.critical(self.mw, "Error", "This is an error")
def Initialize(self):
self.menu = QtGui.QMenu()
self.menu.setTitle("Python Qt")
self.item.append(self.menu.addAction("Test 1"))
self.item.append(self.menu.addAction("Test 2"))
self.item.append(self.menu.addAction("Test 3"))
QtCore.QObject.connect(self.item[0], QtCore.SIGNAL("triggered()"), self.information)
QtCore.QObject.connect(self.item[1], QtCore.SIGNAL("triggered()"), self.warning)
QtCore.QObject.connect(self.item[2], QtCore.SIGNAL("triggered()"), self.critical)
def Activated(self):
self.__title__ = self.mw.windowTitle()
self.mw.setWindowTitle("FreeCAD -- PythonQt")
d = QtGui.QDockWidget()
d.setWindowTitle("Calendar")
c = QtGui.QCalendarWidget()
d.setWidget(c)
self.mw.addDockWidget(QtCore.Qt.RightDockWidgetArea,d)
self.dock[d] = c
bar = self.mw.menuBar()
a=bar.actions()
for i in a:
if i.objectName() == "&Windows":
break
bar.insertMenu(i, self.menu)
self.menu.setTitle("Python Qt")
self.menu.menuAction().setVisible(True)
def Deactivated(self):
self.mw.setWindowTitle(self.__title__)
self.dock.clear()
FreeCADGui.addWorkbench(PythonQtWorkbench)
| lgpl-2.1 |
NCI-Cloud/cloud-tools | check-defunct-instances.py | 1 | 1214 | #!/usr/bin/env python
#
# Take a list of instance UUIDs and check their status. If the last activity
# recorded for them is more than six months ago mark them as defunct.
from util import get_nova_client, get_keystone_client
from util import get_instance, is_instance_to_be_expired
from util import output_report
from util import parser_with_common_args
def parse_args():
parser = parser_with_common_args()
parser.add_argument("-d", "--days", action='store', required=False,
type=int, default='90',
help=(
"Number of days before an instance is considered"
"defunct"
))
return parser.parse_args()
def main():
args = parse_args()
nc = get_nova_client()
kc = get_keystone_client()
instances = []
for uuid in args.hosts:
instance = get_instance(nc, uuid)
if instance is None:
print("Instance %s not found" % (uuid))
else:
if is_instance_to_be_expired(nc, instance, days=args.days):
instances.append(instance)
output_report(nc, kc, instances)
if __name__ == '__main__':
main()
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_01_01/operations/_usages_operations.py | 1 | 5335 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations(object):
"""UsagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_location(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UsageListResult"]
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_01_01.models.UsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_location.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UsageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages'} # type: ignore
| mit |
helldorado/ansible | lib/ansible/modules/network/cloudengine/ce_vrf_af.py | 25 | 30329 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_vrf_af
version_added: "2.4"
short_description: Manages VPN instance address family on HUAWEI CloudEngine switches.
description:
- Manages VPN instance address family of HUAWEI CloudEngine switches.
author: Yang yang (@QijunPan)
notes:
- If I(state=absent), the vrf will be removed, regardless of the
non-required parameters.
options:
vrf:
description:
- VPN instance.
required: true
vrf_aftype:
description:
- VPN instance address family.
choices: ['v4','v6']
default: v4
route_distinguisher:
description:
- VPN instance route distinguisher,the RD used to distinguish same route prefix from different vpn.
The RD must be setted before setting vpn_target_value.
vpn_target_state:
description:
- Manage the state of the vpn target.
choices: ['present','absent']
vpn_target_type:
description:
- VPN instance vpn target type.
choices: ['export_extcommunity', 'import_extcommunity']
vpn_target_value:
description:
- VPN instance target value. Such as X.X.X.X:number<0-65535> or number<0-65535>:number<0-4294967295>
or number<0-65535>.number<0-65535>:number<0-65535> or number<65536-4294967295>:number<0-65535>
but not support 0:0 and 0.0:0.
evpn:
description:
- Is extend vpn or normal vpn.
type: bool
default: 'no'
state:
description:
- Manage the state of the af.
choices: ['present','absent']
default: present
'''
EXAMPLES = '''
- name: vrf af module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Config vpna, set address family is ipv4
ce_vrf_af:
vrf: vpna
vrf_aftype: v4
state: present
provider: "{{ cli }}"
- name: Config vpna, delete address family is ipv4
ce_vrf_af:
vrf: vpna
vrf_aftype: v4
state: absent
provider: "{{ cli }}"
- name: Config vpna, set address family is ipv4,rd=1:1,set vpn_target_type=export_extcommunity,vpn_target_value=2:2
ce_vrf_af:
vrf: vpna
vrf_aftype: v4
route_distinguisher: 1:1
vpn_target_type: export_extcommunity
vpn_target_value: 2:2
vpn_target_state: present
state: present
provider: "{{ cli }}"
- name: Config vpna, set address family is ipv4,rd=1:1,delete vpn_target_type=export_extcommunity,vpn_target_value=2:2
ce_vrf_af:
vrf: vpna
vrf_aftype: v4
route_distinguisher: 1:1
vpn_target_type: export_extcommunity
vpn_target_value: 2:2
vpn_target_state: absent
state: present
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"vrf": "vpna",
"vrf_aftype": "v4",
"state": "present",
"vpn_targe_state":"absent",
"evpn": "none",
"vpn_target_type": "none",
"vpn_target_value": "none"}
existing:
description: k/v pairs of existing switchport
returned: always
type: dict
sample: {
"route_distinguisher": [
"1:1",
"2:2"
],
"vpn_target_type": [],
"vpn_target_value": [],
"vrf": "vpna",
"vrf_aftype": [
"ipv4uni",
"ipv6uni"
]
}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict
sample: {
"route_distinguisher": [
"1:1",
"2:2"
],
"vpn_target_type": [
"import_extcommunity",
"3:3"
],
"vpn_target_value": [],
"vrf": "vpna",
"vrf_aftype": [
"ipv4uni",
"ipv6uni"
]
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"ip vpn-instance vpna",
"vpn-target 3:3 import_extcommunity"
]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_VRF = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName></vrfName>
<vrfDescription></vrfDescription>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_GET_VRF_AF = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<vpnInstAFs>
<vpnInstAF>
<afType></afType>
<vrfRD></vrfRD>%s
</vpnInstAF>
</vpnInstAFs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_DELETE_VRF_AF = """
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<vpnInstAFs>
<vpnInstAF operation="delete">
<afType>%s</afType>
</vpnInstAF>
</vpnInstAFs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
"""
CE_NC_CREATE_VRF_AF = """
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<vpnInstAFs>
<vpnInstAF operation="merge">
<afType>%s</afType>
<vrfRD>%s</vrfRD>%s
</vpnInstAF>
</vpnInstAFs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm></l3vpn>
"""
CE_NC_CREATE_VRF_TARGET = """
<vpnTargets>
<vpnTarget operation="merge">
<vrfRTType>%s</vrfRTType>
<vrfRTValue>%s</vrfRTValue>
</vpnTarget>
</vpnTargets>
"""
CE_NC_DELETE_VRF_TARGET = """
<vpnTargets>
<vpnTarget operation="delete">
<vrfRTType>%s</vrfRTType>
<vrfRTValue>%s</vrfRTValue>
</vpnTarget>
</vpnTargets>
"""
CE_NC_GET_VRF_TARGET = """
<vpnTargets>
<vpnTarget>
<vrfRTValue></vrfRTValue>
<vrfRTType></vrfRTType>
</vpnTarget>
</vpnTargets>
"""
CE_NC_CREATE_EXTEND_VRF_TARGET = """
<exVpnTargets>
<exVpnTarget operation="merge">
<vrfRTType>%s</vrfRTType>
<vrfRTValue>%s</vrfRTValue>
<extAddrFamily>evpn</extAddrFamily>
</exVpnTarget>
</exVpnTargets>
"""
CE_NC_DELETE_EXTEND_VRF_TARGET = """
<exVpnTargets>
<exVpnTarget operation="delete">
<vrfRTType>%s</vrfRTType>
<vrfRTValue>%s</vrfRTValue>
<extAddrFamily>evpn</extAddrFamily>
</exVpnTarget>
</exVpnTargets>
"""
CE_NC_GET_EXTEND_VRF_TARGET = """
<exVpnTargets>
<exVpnTarget>
<vrfRTType></vrfRTType>
<vrfRTValue></vrfRTValue>
<extAddrFamily></extAddrFamily>
</exVpnTarget>
</exVpnTargets>
"""
def build_config_xml(xmlstr):
"""build_config_xml"""
return '<config> ' + xmlstr + ' </config>'
def is_valid_value(vrf_targe_value):
"""check if the vrf target value is valid"""
each_num = None
if len(vrf_targe_value) > 21 or len(vrf_targe_value) < 3:
return False
if vrf_targe_value.find(':') == -1:
return False
elif vrf_targe_value == '0:0':
return False
elif vrf_targe_value == '0.0:0':
return False
else:
value_list = vrf_targe_value.split(':')
if value_list[0].find('.') != -1:
if not value_list[1].isdigit():
return False
if int(value_list[1]) > 65535:
return False
value = value_list[0].split('.')
if len(value) == 4:
for each_num in value:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
elif len(value) == 2:
for each_num in value:
if not each_num.isdigit():
return False
if int(each_num) > 65535:
return False
return True
else:
return False
elif not value_list[0].isdigit():
return False
elif not value_list[1].isdigit():
return False
elif int(value_list[0]) < 65536 and int(value_list[1]) < 4294967296:
return True
elif int(value_list[0]) > 65535 and int(value_list[0]) < 4294967296:
return bool(int(value_list[1]) < 65536)
else:
return False
class VrfAf(object):
"""manage the vrf address family and export/import target"""
def __init__(self, argument_spec, ):
self.spec = argument_spec
self.module = None
self.init_module()
# vpn instance info
self.vrf = self.module.params['vrf']
self.vrf_aftype = self.module.params['vrf_aftype']
if self.vrf_aftype == 'v4':
self.vrf_aftype = 'ipv4uni'
else:
self.vrf_aftype = 'ipv6uni'
self.route_distinguisher = self.module.params['route_distinguisher']
self.evpn = self.module.params['evpn']
self.vpn_target_type = self.module.params['vpn_target_type']
self.vpn_target_value = self.module.params['vpn_target_value']
self.vpn_target_state = self.module.params['vpn_target_state']
self.state = self.module.params['state']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
self.vpn_target_changed = False
self.vrf_af_type_changed = False
self.vrf_rd_changed = False
self.vrf_af_info = dict()
def init_module(self):
"""init_module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def is_vrf_af_exist(self):
"""is vrf address family exist"""
if not self.vrf_af_info:
return False
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
if vrf_af_ele["afType"] == self.vrf_aftype:
return True
else:
continue
return False
def get_exist_rd(self):
"""get exist route distinguisher """
if not self.vrf_af_info:
return None
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
if vrf_af_ele["afType"] == self.vrf_aftype:
if vrf_af_ele["vrfRD"] is None:
return None
else:
return vrf_af_ele["vrfRD"]
else:
continue
return None
def is_vrf_rd_exist(self):
"""is vrf route distinguisher exist"""
if not self.vrf_af_info:
return False
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
if vrf_af_ele["afType"] == self.vrf_aftype:
if vrf_af_ele["vrfRD"] is None:
return False
if self.route_distinguisher is not None:
return bool(vrf_af_ele["vrfRD"] == self.route_distinguisher)
else:
return True
else:
continue
return False
def is_vrf_rt_exist(self):
"""is vpn target exist"""
if not self.vrf_af_info:
return False
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
if vrf_af_ele["afType"] == self.vrf_aftype:
if self.evpn is False:
if not vrf_af_ele.get("vpnTargets"):
return False
for vpn_target in vrf_af_ele.get("vpnTargets"):
if vpn_target["vrfRTType"] == self.vpn_target_type \
and vpn_target["vrfRTValue"] == self.vpn_target_value:
return True
else:
continue
else:
if not vrf_af_ele.get("evpnTargets"):
return False
for evpn_target in vrf_af_ele.get("evpnTargets"):
if evpn_target["vrfRTType"] == self.vpn_target_type \
and evpn_target["vrfRTValue"] == self.vpn_target_value:
return True
else:
continue
else:
continue
return False
def set_update_cmd(self):
""" set update command"""
if not self.changed:
return
if self.state == "present":
self.updates_cmd.append('ip vpn-instance %s' % (self.vrf))
if self.vrf_aftype == 'ipv4uni':
self.updates_cmd.append('ipv4-family')
elif self.vrf_aftype == 'ipv6uni':
self.updates_cmd.append('ipv6-family')
if self.route_distinguisher:
if not self.is_vrf_rd_exist():
self.updates_cmd.append(
'route-distinguisher %s' % self.route_distinguisher)
else:
if self.get_exist_rd() is not None:
self.updates_cmd.append(
'undo route-distinguisher %s' % self.get_exist_rd())
if self.vpn_target_state == "present":
if not self.is_vrf_rt_exist():
if self.evpn is False:
self.updates_cmd.append(
'vpn-target %s %s' % (self.vpn_target_value, self.vpn_target_type))
else:
self.updates_cmd.append(
'vpn-target %s %s evpn' % (self.vpn_target_value, self.vpn_target_type))
elif self.vpn_target_state == "absent":
if self.is_vrf_rt_exist():
if self.evpn is False:
self.updates_cmd.append(
'undo vpn-target %s %s' % (self.vpn_target_value, self.vpn_target_type))
else:
self.updates_cmd.append(
'undo vpn-target %s %s evpn' % (self.vpn_target_value, self.vpn_target_type))
else:
self.updates_cmd.append('ip vpn-instance %s' % (self.vrf))
if self.vrf_aftype == 'ipv4uni':
self.updates_cmd.append('undo ipv4-family')
elif self.vrf_aftype == 'ipv6uni':
self.updates_cmd.append('undo ipv6-family')
def get_vrf(self):
""" check if vrf is need to change"""
getxmlstr = CE_NC_GET_VRF
xmlstr_new_1 = (self.vrf.lower())
xml_str = get_nc_config(self.module, getxmlstr)
re_find_1 = re.findall(
r'.*<vrfname>(.*)</vrfname>.*', xml_str.lower())
if re_find_1 is None:
return False
return xmlstr_new_1 in re_find_1
def get_vrf_af(self):
""" check if vrf is need to change"""
self.vrf_af_info["vpnInstAF"] = list()
if self.evpn is True:
getxmlstr = CE_NC_GET_VRF_AF % (
self.vrf, CE_NC_GET_EXTEND_VRF_TARGET)
else:
getxmlstr = CE_NC_GET_VRF_AF % (self.vrf, CE_NC_GET_VRF_TARGET)
xml_str = get_nc_config(self.module, getxmlstr)
if 'data/' in xml_str:
return self.state == 'present'
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
# get the vpn address family and RD text
vrf_addr_types = root.findall(
"data/l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance/vpnInstAFs/vpnInstAF")
if vrf_addr_types:
for vrf_addr_type in vrf_addr_types:
vrf_af_info = dict()
for vrf_addr_type_ele in vrf_addr_type:
if vrf_addr_type_ele.tag in ["vrfName", "afType", "vrfRD"]:
vrf_af_info[vrf_addr_type_ele.tag] = vrf_addr_type_ele.text
if vrf_addr_type_ele.tag == 'vpnTargets':
vrf_af_info["vpnTargets"] = list()
for rtargets in vrf_addr_type_ele:
rt_dict = dict()
for rtarget in rtargets:
if rtarget.tag in ["vrfRTValue", "vrfRTType"]:
rt_dict[rtarget.tag] = rtarget.text
vrf_af_info["vpnTargets"].append(rt_dict)
if vrf_addr_type_ele.tag == 'exVpnTargets':
vrf_af_info["evpnTargets"] = list()
for rtargets in vrf_addr_type_ele:
rt_dict = dict()
for rtarget in rtargets:
if rtarget.tag in ["vrfRTValue", "vrfRTType"]:
rt_dict[rtarget.tag] = rtarget.text
vrf_af_info["evpnTargets"].append(rt_dict)
self.vrf_af_info["vpnInstAF"].append(vrf_af_info)
def check_params(self):
"""Check all input params"""
# vrf and description check
if self.vrf == '_public_':
self.module.fail_json(
msg='Error: The vrf name _public_ is reserved.')
if not self.get_vrf():
self.module.fail_json(
msg='Error: The vrf name do not exist.')
if self.state == 'present':
if self.route_distinguisher:
if not is_valid_value(self.route_distinguisher):
self.module.fail_json(msg='Error:The vrf route distinguisher length must between 3 ~ 21,'
'i.e. X.X.X.X:number<0-65535> or number<0-65535>:number<0-4294967295>'
'or number<0-65535>.number<0-65535>:number<0-65535>'
'or number<65536-4294967295>:number<0-65535>'
' but not be 0:0 or 0.0:0.')
if not self.vpn_target_state:
if self.vpn_target_value or self.vpn_target_type:
self.module.fail_json(
msg='Error: The vpn target state should be exist.')
if self.vpn_target_state:
if not self.vpn_target_value or not self.vpn_target_type:
self.module.fail_json(
msg='Error: The vpn target value and type should be exist.')
if self.vpn_target_value:
if not is_valid_value(self.vpn_target_value):
self.module.fail_json(msg='Error:The vrf target value length must between 3 ~ 21,'
'i.e. X.X.X.X:number<0-65535> or number<0-65535>:number<0-4294967295>'
'or number<0-65535>.number<0-65535>:number<0-65535>'
'or number<65536-4294967295>:number<0-65535>'
' but not be 0:0 or 0.0:0.')
def operate_vrf_af(self):
"""config/delete vrf"""
vrf_target_operate = ''
if self.route_distinguisher is None:
route_d = ''
else:
route_d = self.route_distinguisher
if self.state == 'present':
if self.vrf_aftype:
if self.is_vrf_af_exist():
self.vrf_af_type_changed = False
else:
self.vrf_af_type_changed = True
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
else:
self.vrf_af_type_changed = bool(self.is_vrf_af_exist())
if self.vpn_target_state == 'present':
if self.evpn is False and not self.is_vrf_rt_exist():
vrf_target_operate = CE_NC_CREATE_VRF_TARGET % (
self.vpn_target_type, self.vpn_target_value)
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vpn_target_changed = True
if self.evpn is True and not self.is_vrf_rt_exist():
vrf_target_operate = CE_NC_CREATE_EXTEND_VRF_TARGET % (
self.vpn_target_type, self.vpn_target_value)
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vpn_target_changed = True
elif self.vpn_target_state == 'absent':
if self.evpn is False and self.is_vrf_rt_exist():
vrf_target_operate = CE_NC_DELETE_VRF_TARGET % (
self.vpn_target_type, self.vpn_target_value)
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vpn_target_changed = True
if self.evpn is True and self.is_vrf_rt_exist():
vrf_target_operate = CE_NC_DELETE_EXTEND_VRF_TARGET % (
self.vpn_target_type, self.vpn_target_value)
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vpn_target_changed = True
else:
if self.route_distinguisher:
if not self.is_vrf_rd_exist():
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vrf_rd_changed = True
else:
self.vrf_rd_changed = False
else:
if self.is_vrf_rd_exist():
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vrf_rd_changed = True
else:
self.vrf_rd_changed = False
if not self.vrf_rd_changed and not self.vrf_af_type_changed and not self.vpn_target_changed:
self.changed = False
else:
self.changed = True
else:
if self.is_vrf_af_exist():
configxmlstr = CE_NC_DELETE_VRF_AF % (
self.vrf, self.vrf_aftype)
self.changed = True
else:
self.changed = False
if not self.changed:
return
conf_str = build_config_xml(configxmlstr)
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "OPERATE_VRF_AF")
def get_proposed(self):
"""get_proposed"""
if self.state == 'present':
self.proposed['vrf'] = self.vrf
if self.vrf_aftype is None:
self.proposed['vrf_aftype'] = 'ipv4uni'
else:
self.proposed['vrf_aftype'] = self.vrf_aftype
if self.route_distinguisher is not None:
self.proposed['route_distinguisher'] = self.route_distinguisher
else:
self.proposed['route_distinguisher'] = list()
if self.vpn_target_state == 'present':
self.proposed['evpn'] = self.evpn
self.proposed['vpn_target_type'] = self.vpn_target_type
self.proposed['vpn_target_value'] = self.vpn_target_value
else:
self.proposed['vpn_target_type'] = list()
self.proposed['vpn_target_value'] = list()
else:
self.proposed = dict()
self.proposed['state'] = self.state
self.proposed['vrf'] = self.vrf
self.proposed['vrf_aftype'] = list()
self.proposed['route_distinguisher'] = list()
self.proposed['vpn_target_value'] = list()
self.proposed['vpn_target_type'] = list()
def get_existing(self):
"""get_existing"""
self.get_vrf_af()
self.existing['vrf'] = self.vrf
self.existing['vrf_aftype'] = list()
self.existing['route_distinguisher'] = list()
self.existing['vpn_target_value'] = list()
self.existing['vpn_target_type'] = list()
self.existing['evpn_target_value'] = list()
self.existing['evpn_target_type'] = list()
if self.vrf_af_info["vpnInstAF"] is None:
return
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
self.existing['vrf_aftype'].append(vrf_af_ele["afType"])
self.existing['route_distinguisher'].append(
vrf_af_ele["vrfRD"])
if vrf_af_ele.get("vpnTargets"):
for vpn_target in vrf_af_ele.get("vpnTargets"):
self.existing['vpn_target_type'].append(
vpn_target["vrfRTType"])
self.existing['vpn_target_value'].append(
vpn_target["vrfRTValue"])
if vrf_af_ele.get("evpnTargets"):
for evpn_target in vrf_af_ele.get("evpnTargets"):
self.existing['evpn_target_type'].append(
evpn_target["vrfRTType"])
self.existing['evpn_target_value'].append(
evpn_target["vrfRTValue"])
def get_end_state(self):
"""get_end_state"""
self.get_vrf_af()
self.end_state['vrf'] = self.vrf
self.end_state['vrf_aftype'] = list()
self.end_state['route_distinguisher'] = list()
self.end_state['vpn_target_value'] = list()
self.end_state['vpn_target_type'] = list()
self.end_state['evpn_target_value'] = list()
self.end_state['evpn_target_type'] = list()
if self.vrf_af_info["vpnInstAF"] is None:
return
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
self.end_state['vrf_aftype'].append(vrf_af_ele["afType"])
self.end_state['route_distinguisher'].append(vrf_af_ele["vrfRD"])
if vrf_af_ele.get("vpnTargets"):
for vpn_target in vrf_af_ele.get("vpnTargets"):
self.end_state['vpn_target_type'].append(
vpn_target["vrfRTType"])
self.end_state['vpn_target_value'].append(
vpn_target["vrfRTValue"])
if vrf_af_ele.get("evpnTargets"):
for evpn_target in vrf_af_ele.get("evpnTargets"):
self.end_state['evpn_target_type'].append(
evpn_target["vrfRTType"])
self.end_state['evpn_target_value'].append(
evpn_target["vrfRTValue"])
def work(self):
"""worker"""
self.check_params()
self.get_existing()
self.get_proposed()
self.operate_vrf_af()
self.set_update_cmd()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""main"""
argument_spec = dict(
vrf=dict(required=True, type='str'),
vrf_aftype=dict(choices=['v4', 'v6'],
default='v4', required=False),
route_distinguisher=dict(required=False, type='str'),
evpn=dict(type='bool', default=False),
vpn_target_type=dict(
choices=['export_extcommunity', 'import_extcommunity'], required=False),
vpn_target_value=dict(required=False, type='str'),
vpn_target_state=dict(choices=['absent', 'present'], required=False),
state=dict(choices=['absent', 'present'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
interface = VrfAf(argument_spec)
interface.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
vmindru/ansible | lib/ansible/plugins/doc_fragments/infinibox.py | 210 | 1564 | #
# (c) 2016, Gregory Shulov <gregory.shulov@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard Infinibox documentation fragment
DOCUMENTATION = '''
options:
system:
description:
- Infinibox Hostname or IPv4 Address.
required: true
user:
description:
- Infinibox User username with sufficient priveledges ( see notes ).
required: false
password:
description:
- Infinibox User password.
required: false
notes:
- This module requires infinisdk python library
- You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables
if user and password arguments are not passed to the module directly
- Ansible uses the infinisdk configuration file C(~/.infinidat/infinisdk.ini) if no credentials are provided.
See U(http://infinisdk.readthedocs.io/en/latest/getting_started.html)
requirements:
- "python >= 2.7"
- infinisdk
'''
| gpl-3.0 |
fusionbox/django-extensions | django_extensions/management/commands/find_template.py | 18 | 1114 | from django.core.management.base import LabelCommand
from django.template import loader
from django.template import TemplateDoesNotExist
import sys
def get_template_path(path):
try:
template = loader.find_template(path)
if template[1]:
return template[1].name
# work arround https://code.djangoproject.com/ticket/17199 issue
for template_loader in loader.template_source_loaders:
try:
source, origin = template_loader.load_template_source(path)
return origin
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(path)
except TemplateDoesNotExist:
return None
class Command(LabelCommand):
help = "Finds the location of the given template by resolving its path"
args = "[template_path]"
label = 'template path'
def handle_label(self, template_path, **options):
path = get_template_path(template_path)
if path is None:
sys.stderr.write("No template found\n")
sys.exit(1)
else:
print path
| mit |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/schema.py | 187 | 4850 | from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
class PostGISSchemaEditor(DatabaseSchemaEditor):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
sql_add_geometry_column = "SELECT AddGeometryColumn(%(table)s, %(column)s, %(srid)s, %(geom_type)s, %(dim)s)"
sql_drop_geometry_column = "SELECT DropGeometryColumn(%(table)s, %(column)s)"
sql_alter_geometry_column_not_null = "ALTER TABLE %(table)s ALTER COLUMN %(column)s SET NOT NULL"
sql_add_spatial_index = "CREATE INDEX %(index)s ON %(table)s USING %(index_type)s (%(column)s %(ops)s)"
sql_clear_geometry_columns = "DELETE FROM geometry_columns WHERE f_table_name = %(table)s"
def __init__(self, *args, **kwargs):
super(PostGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
from django.contrib.gis.db.models.fields import GeometryField
if not isinstance(field, GeometryField):
return super(PostGISSchemaEditor, self).column_sql(model, field, include_default)
if field.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
column_sql = super(PostGISSchemaEditor, self).column_sql(model, field, include_default)
else:
column_sql = None, None
# Geometry columns are created by the `AddGeometryColumn`
# stored procedure.
self.geometry_sql.append(
self.sql_add_geometry_column % {
"table": self.geo_quote_name(model._meta.db_table),
"column": self.geo_quote_name(field.column),
"srid": field.srid,
"geom_type": self.geo_quote_name(field.geom_type),
"dim": field.dim,
}
)
if not field.null:
self.geometry_sql.append(
self.sql_alter_geometry_column_not_null % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
)
if field.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if field.geography:
index_ops = ''
elif self.connection.ops.geometry:
if field.dim > 2:
index_ops = self.geom_index_ops_nd
else:
index_ops = ''
else:
index_ops = self.geom_index_ops
self.geometry_sql.append(
self.sql_add_spatial_index % {
"index": self.quote_name('%s_%s_id' % (model._meta.db_table, field.column)),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"index_type": self.geom_index_type,
"ops": index_ops,
}
)
return column_sql
def create_model(self, model):
super(PostGISSchemaEditor, self).create_model(model)
# Create geometry columns
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def delete_model(self, model):
super(PostGISSchemaEditor, self).delete_model(model)
self.execute(self.sql_clear_geometry_columns % {
"table": self.geo_quote_name(model._meta.db_table),
})
def add_field(self, model, field):
super(PostGISSchemaEditor, self).add_field(model, field)
# Create geometry columns
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def remove_field(self, model, field):
from django.contrib.gis.db.models.fields import GeometryField
if not isinstance(field, GeometryField) or \
self.connection.ops.spatial_version > (2, 0) or \
field.geography:
super(PostGISSchemaEditor, self).remove_field(model, field)
else:
self.execute(
self.sql_drop_geometry_column % {
"table": self.geo_quote_name(model._meta.db_table),
"column": self.geo_quote_name(field.column),
}
)
| gpl-2.0 |
unix1986/scons | engine/SCons/Tool/GettextCommon.py | 9 | 18115 | """SCons.Tool.GettextCommon module
Used by several tools of `gettext` toolset.
"""
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/GettextCommon.py 2014/09/27 12:51:43 garyo"
import SCons.Warnings
import re
#############################################################################
class XgettextToolWarning(SCons.Warnings.Warning): pass
class XgettextNotFound(XgettextToolWarning): pass
class MsginitToolWarning(SCons.Warnings.Warning): pass
class MsginitNotFound(MsginitToolWarning): pass
class MsgmergeToolWarning(SCons.Warnings.Warning): pass
class MsgmergeNotFound(MsgmergeToolWarning): pass
class MsgfmtToolWarning(SCons.Warnings.Warning): pass
class MsgfmtNotFound(MsgfmtToolWarning): pass
#############################################################################
SCons.Warnings.enableWarningClass(XgettextToolWarning)
SCons.Warnings.enableWarningClass(XgettextNotFound)
SCons.Warnings.enableWarningClass(MsginitToolWarning)
SCons.Warnings.enableWarningClass(MsginitNotFound)
SCons.Warnings.enableWarningClass(MsgmergeToolWarning)
SCons.Warnings.enableWarningClass(MsgmergeNotFound)
SCons.Warnings.enableWarningClass(MsgfmtToolWarning)
SCons.Warnings.enableWarningClass(MsgfmtNotFound)
#############################################################################
#############################################################################
class _POTargetFactory(object):
""" A factory of `PO` target files.
Factory defaults differ from these of `SCons.Node.FS.FS`. We set `precious`
(this is required by builders and actions gettext) and `noclean` flags by
default for all produced nodes.
"""
def __init__( self, env, nodefault = True, alias = None, precious = True
, noclean = True ):
""" Object constructor.
**Arguments**
- *env* (`SCons.Environment.Environment`)
- *nodefault* (`boolean`) - if `True`, produced nodes will be ignored
from default target `'.'`
- *alias* (`string`) - if provided, produced nodes will be automatically
added to this alias, and alias will be set as `AlwaysBuild`
- *precious* (`boolean`) - if `True`, the produced nodes will be set as
`Precious`.
- *noclen* (`boolean`) - if `True`, the produced nodes will be excluded
from `Clean`.
"""
self.env = env
self.alias = alias
self.precious = precious
self.noclean = noclean
self.nodefault = nodefault
def _create_node(self, name, factory, directory = None, create = 1):
""" Create node, and set it up to factory settings. """
import SCons.Util
node = factory(name, directory, create)
node.set_noclean(self.noclean)
node.set_precious(self.precious)
if self.nodefault:
self.env.Ignore('.', node)
if self.alias:
self.env.AlwaysBuild(self.env.Alias(self.alias, node))
return node
def Entry(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.Entry` """
return self._create_node(name, self.env.fs.Entry, directory, create)
def File(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.File` """
return self._create_node(name, self.env.fs.File, directory, create)
#############################################################################
#############################################################################
_re_comment = re.compile(r'(#[^\n\r]+)$', re.M)
_re_lang = re.compile(r'([a-zA-Z0-9_]+)', re.M)
#############################################################################
def _read_linguas_from_files(env, linguas_files = None):
""" Parse `LINGUAS` file and return list of extracted languages """
import SCons.Util
import SCons.Environment
global _re_comment
global _re_lang
if not SCons.Util.is_List(linguas_files) \
and not SCons.Util.is_String(linguas_files) \
and not isinstance(linguas_files, SCons.Node.FS.Base) \
and linguas_files:
# If, linguas_files==True or such, then read 'LINGUAS' file.
linguas_files = [ 'LINGUAS' ]
if linguas_files is None:
return []
fnodes = env.arg2nodes(linguas_files)
linguas = []
for fnode in fnodes:
contents = _re_comment.sub("", fnode.get_text_contents())
ls = [ l for l in _re_lang.findall(contents) if l ]
linguas.extend(ls)
return linguas
#############################################################################
#############################################################################
from SCons.Builder import BuilderBase
#############################################################################
class _POFileBuilder(BuilderBase):
""" `PO` file builder.
This is multi-target single-source builder. In typical situation the source
is single `POT` file, e.g. `messages.pot`, and there are multiple `PO`
targets to be updated from this `POT`. We must run
`SCons.Builder.BuilderBase._execute()` separatelly for each target to track
dependencies separatelly for each target file.
**NOTE**: if we call `SCons.Builder.BuilderBase._execute(.., target, ...)`
with target being list of all targets, all targets would be rebuilt each time
one of the targets from this list is missing. This would happen, for example,
when new language `ll` enters `LINGUAS_FILE` (at this moment there is no
`ll.po` file yet). To avoid this, we override
`SCons.Builder.BuilerBase._execute()` and call it separatelly for each
target. Here we also append to the target list the languages read from
`LINGUAS_FILE`.
"""
#
#* The argument for overriding _execute(): We must use environment with
# builder overrides applied (see BuilderBase.__init__(). Here it comes for
# free.
#* The argument against using 'emitter': The emitter is called too late
# by BuilderBase._execute(). If user calls, for example:
#
# env.POUpdate(LINGUAS_FILE = 'LINGUAS')
#
# the builder throws error, because it is called with target=None,
# source=None and is trying to "generate" sources or target list first.
# If user calls
#
# env.POUpdate(['foo', 'baz'], LINGUAS_FILE = 'LINGUAS')
#
# the env.BuilderWrapper() calls our builder with target=None,
# source=['foo', 'baz']. The BuilderBase._execute() then splits execution
# and execute iterativelly (recursion) self._execute(None, source[i]).
# After that it calls emitter (which is quite too late). The emitter is
# also called in each iteration, what makes things yet worse.
def __init__(self, env, **kw):
if not 'suffix' in kw:
kw['suffix'] = '$POSUFFIX'
if not 'src_suffix' in kw:
kw['src_suffix'] = '$POTSUFFIX'
if not 'src_builder' in kw:
kw['src_builder'] = '_POTUpdateBuilder'
if not 'single_source' in kw:
kw['single_source'] = True
alias = None
if 'target_alias' in kw:
alias = kw['target_alias']
del kw['target_alias']
if not 'target_factory' in kw:
kw['target_factory'] = _POTargetFactory(env, alias=alias).File
BuilderBase.__init__(self, **kw)
def _execute(self, env, target, source, *args, **kw):
""" Execute builder's actions.
Here we append to `target` the languages read from `$LINGUAS_FILE` and
apply `SCons.Builder.BuilderBase._execute()` separatelly to each target.
The arguments and return value are same as for
`SCons.Builder.BuilderBase._execute()`.
"""
import SCons.Util
import SCons.Node
linguas_files = None
if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE']:
linguas_files = env['LINGUAS_FILE']
# This prevents endless recursion loop (we'll be invoked once for
# each target appended here, we must not extend the list again).
env['LINGUAS_FILE'] = None
linguas = _read_linguas_from_files(env,linguas_files)
if SCons.Util.is_List(target):
target.extend(linguas)
elif target is not None:
target = [target] + linguas
else:
target = linguas
if not target:
# Let the SCons.BuilderBase to handle this patologic situation
return BuilderBase._execute( self, env, target, source, *args, **kw)
# The rest is ours
if not SCons.Util.is_List(target):
target = [ target ]
result = []
for tgt in target:
r = BuilderBase._execute( self, env, [tgt], source, *args, **kw)
result.extend(r)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return SCons.Node.NodeList(result)
#############################################################################
import SCons.Environment
#############################################################################
def _translate(env, target=None, source=SCons.Environment._null, *args, **kw):
""" Function for `Translate()` pseudo-builder """
if target is None: target = []
pot = env.POTUpdate(None, source, *args, **kw)
po = env.POUpdate(target, pot, *args, **kw)
return po
#############################################################################
#############################################################################
class RPaths(object):
""" Callable object, which returns pathnames relative to SCons current
working directory.
It seems like `SCons.Node.FS.Base.get_path()` returns absolute paths
for nodes that are outside of current working directory (`env.fs.getcwd()`).
Here, we often have `SConscript`, `POT` and `PO` files within `po/`
directory and source files (e.g. `*.c`) outside of it. When generating `POT`
template file, references to source files are written to `POT` template, so
a translator may later quickly jump to appropriate source file and line from
its `PO` editor (e.g. `poedit`). Relative paths in `PO` file are usually
interpreted by `PO` editor as paths relative to the place, where `PO` file
lives. The absolute paths would make resultant `POT` file nonportable, as
the references would be correct only on the machine, where `POT` file was
recently re-created. For such reason, we need a function, which always
returns relative paths. This is the purpose of `RPaths` callable object.
The `__call__` method returns paths relative to current woking directory, but
we assume, that *xgettext(1)* is run from the directory, where target file is
going to be created.
Note, that this may not work for files distributed over several hosts or
across different drives on windows. We assume here, that single local
filesystem holds both source files and target `POT` templates.
Intended use of `RPaths` - in `xgettext.py`::
def generate(env):
from GettextCommon import RPaths
...
sources = '$( ${_concat( "", SOURCES, "", __env__, XgettextRPaths, TARGET, SOURCES)} $)'
env.Append(
...
XGETTEXTCOM = 'XGETTEXT ... ' + sources,
...
XgettextRPaths = RPaths(env)
)
"""
# NOTE: This callable object returns pathnames of dirs/files relative to
# current working directory. The pathname remains relative also for entries
# that are outside of current working directory (node, that
# SCons.Node.FS.File and siblings return absolute path in such case). For
# simplicity we compute path relative to current working directory, this
# seems be enough for our purposes (don't need TARGET variable and
# SCons.Defaults.Variable_Caller stuff).
def __init__(self, env):
""" Initialize `RPaths` callable object.
**Arguments**:
- *env* - a `SCons.Environment.Environment` object, defines *current
working dir*.
"""
self.env = env
# FIXME: I'm not sure, how it should be implemented (what the *args are in
# general, what is **kw).
def __call__(self, nodes, *args, **kw):
""" Return nodes' paths (strings) relative to current working directory.
**Arguments**:
- *nodes* ([`SCons.Node.FS.Base`]) - list of nodes.
- *args* - currently unused.
- *kw* - currently unused.
**Returns**:
- Tuple of strings, which represent paths relative to current working
directory (for given environment).
"""
# os.path.relpath is available only on python >= 2.6. We use our own
# implementation. It's taken from BareNecessities package:
# http://jimmyg.org/work/code/barenecessities/index.html
from posixpath import curdir
def relpath(path, start=curdir):
import posixpath
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
import os
import SCons.Node.FS
rpaths = ()
cwd = self.env.fs.getcwd().get_abspath()
for node in nodes:
rpath = None
if isinstance(node, SCons.Node.FS.Base):
rpath = relpath(node.get_abspath(), cwd)
# FIXME: Other types possible here?
if rpath is not None:
rpaths += (rpath,)
return rpaths
#############################################################################
#############################################################################
def _init_po_files(target, source, env):
""" Action function for `POInit` builder. """
nop = lambda target, source, env : 0
if env.has_key('POAUTOINIT'):
autoinit = env['POAUTOINIT']
else:
autoinit = False
# Well, if everything outside works well, this loop should do single
# iteration. Otherwise we are rebuilding all the targets even, if just
# one has changed (but is this out fault?).
for tgt in target:
if not tgt.exists():
if autoinit:
action = SCons.Action.Action('$MSGINITCOM', '$MSGINITCOMSTR')
else:
msg = 'File ' + repr(str(tgt)) + ' does not exist. ' \
+ 'If you are a translator, you can create it through: \n' \
+ '$MSGINITCOM'
action = SCons.Action.Action(nop, msg)
status = action([tgt], source, env)
if status: return status
return 0
#############################################################################
#############################################################################
def _detect_xgettext(env):
""" Detects *xgettext(1)* binary """
if env.has_key('XGETTEXT'):
return env['XGETTEXT']
xgettext = env.Detect('xgettext');
if xgettext:
return xgettext
raise SCons.Errors.StopError(XgettextNotFound,"Could not detect xgettext")
return None
#############################################################################
def _xgettext_exists(env):
return _detect_xgettext(env)
#############################################################################
#############################################################################
def _detect_msginit(env):
""" Detects *msginit(1)* program. """
if env.has_key('MSGINIT'):
return env['MSGINIT']
msginit = env.Detect('msginit');
if msginit:
return msginit
raise SCons.Errors.StopError(MsginitNotFound, "Could not detect msginit")
return None
#############################################################################
def _msginit_exists(env):
return _detect_msginit(env)
#############################################################################
#############################################################################
def _detect_msgmerge(env):
""" Detects *msgmerge(1)* program. """
if env.has_key('MSGMERGE'):
return env['MSGMERGE']
msgmerge = env.Detect('msgmerge');
if msgmerge:
return msgmerge
raise SCons.Errors.StopError(MsgmergeNotFound, "Could not detect msgmerge")
return None
#############################################################################
def _msgmerge_exists(env):
return _detect_msgmerge(env)
#############################################################################
#############################################################################
def _detect_msgfmt(env):
""" Detects *msgmfmt(1)* program. """
if env.has_key('MSGFMT'):
return env['MSGFMT']
msgfmt = env.Detect('msgfmt');
if msgfmt:
return msgfmt
raise SCons.Errors.StopError(MsgfmtNotFound, "Could not detect msgfmt")
return None
#############################################################################
def _msgfmt_exists(env):
return _detect_msgfmt(env)
#############################################################################
#############################################################################
def tool_list(platform, env):
""" List tools that shall be generated by top-level `gettext` tool """
return [ 'xgettext', 'msginit', 'msgmerge', 'msgfmt' ]
#############################################################################
| mit |
maeltac/hazanet | sense.py | 1 | 1881 |
import pdb
"""
Each sensor that uses this will follow these rules:
calling sensor.startup() function will initialize and calibrate the sensor. It will return 'Green' on success, 'Red' on failure
calling sensor.read() will return a float for that tick
calling sensor.reset() will attempt to reset the sensor, returning 0 for success, 1 for failure, or 2 for wait
"""
class Sensor():
def startup(self,sentype):
#pdb.set_trace()
if sentype == 'RAD':
return RAD.startup(self,sentype)
elif sentype =='CO':
return CO.startup(self,sentype)
elif sentype =='CH4':
return CH4.startup(self,sentype)
elif sentype =='C6H6':
return C6H6.startup(self,sentype)
elif sentype =='C3H8':
return C3H8.startup(self,sentype)
else:
return 'Error Initializing'
def read(self):
return 0
def reset(self):
return 0
def supported(self):
supportlist = ['RAD', 'CO', 'CH4', 'C6H6', 'C3H8']
return supportlist
class RAD(Sensor):
def startup(self,sentype):
retstring = 'Sickly Green'
return retstring
def read(self):
return 0
def reset(self):
return 0
class CO(Sensor):
def startup(self,sentype):
return 'Blue'
def read(self):
return 0
def reset(self):
return 0
class CH4(Sensor):
def startup(self,sentype):
return 'Nausious'
def read(self):
return 0
def reset(self):
return 0
class C6H6(Sensor):
def startup(self, sentype):
return 'Toxic'
def read(self):
return 0
def reset(self):
return 0
class C3H8(Sensor):
def startup(self, sentype):
return 'On Fire'
def read(self):
return 0
def reset(self):
return 0
| apache-2.0 |
flavio-fernandes/networking-odl | networking_odl/db/migration/alembic_migrations/env.py | 2 | 2841 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from logging import config as logging_config
from alembic import context
from oslo_config import cfg
from oslo_db.sqlalchemy import session
import sqlalchemy as sa
from sqlalchemy import event
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration.models import head # noqa
from neutron.db import model_base
MYSQL_ENGINE = None
ODL_VERSION_TABLE = 'odl_alembic_version'
config = context.config
neutron_config = config.neutron_config
logging_config.fileConfig(config.config_file_name)
target_metadata = model_base.BASEV2.metadata
def set_mysql_engine():
try:
mysql_engine = neutron_config.command.mysql_engine
except cfg.NoSuchOptError:
mysql_engine = None
global MYSQL_ENGINE
MYSQL_ENGINE = (mysql_engine or
model_base.BASEV2.__table_args__['mysql_engine'])
def include_object(object, name, type_, reflected, compare_to):
if type_ == 'table' and name in external.TABLES:
return False
else:
return True
def run_migrations_offline():
set_mysql_engine()
kwargs = dict()
if neutron_config.database.connection:
kwargs['url'] = neutron_config.database.connection
else:
kwargs['dialect_name'] = neutron_config.database.engine
kwargs['include_object'] = include_object
kwargs['version_table'] = ODL_VERSION_TABLE
context.configure(**kwargs)
with context.begin_transaction():
context.run_migrations()
@event.listens_for(sa.Table, 'after_parent_attach')
def set_storage_engine(target, parent):
if MYSQL_ENGINE:
target.kwargs['mysql_engine'] = MYSQL_ENGINE
def run_migrations_online():
set_mysql_engine()
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
version_table=ODL_VERSION_TABLE
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
engine.dispose()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| apache-2.0 |
daisymax/nvda | source/appModules/totalcmd.py | 2 | 1422 | #appModules/totalcmd.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2012 NVDA Contributors
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import appModuleHandler
from NVDAObjects.IAccessible import IAccessible
import speech
import controlTypes
oldActivePannel=0
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if obj.windowClassName in ("TMyListBox", "TMyListBox.UnicodeClass"):
clsList.insert(0, TCList)
class TCList(IAccessible):
def event_gainFocus(self):
global oldActivePannel
if oldActivePannel !=self.windowControlID:
oldActivePannel=self.windowControlID
obj=self
while obj and obj.parent and obj.parent.windowClassName!="TTOTAL_CMD":
obj=obj.parent
counter=0
while obj and obj.previous and obj.windowClassName!="TPanel":
obj=obj.previous
if obj.windowClassName!="TDrivePanel":
counter+=1
if counter==2:
speech.speakMessage(_("left"))
else:
speech.speakMessage(_("right"))
super(TCList,self).event_gainFocus()
def reportFocus(self):
if self.name:
speakList=[]
if controlTypes.STATE_SELECTED in self.states:
speakList.append(controlTypes.stateLabels[controlTypes.STATE_SELECTED])
speakList.append(self.name.split("\\")[-1])
speech.speakMessage(" ".join(speakList))
else:
super(TCList,self).reportFocus()
| gpl-2.0 |
athulkrishnan/google-python-class | basic/solution/wordcount.py | 211 | 3529 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
#### LAB(begin solution)
def word_count_dict(filename):
"""Returns a word/count dict for this filename."""
# Utility used by count() and Topcount().
word_count = {} # Map each word to its count
input_file = open(filename, 'r')
for line in input_file:
words = line.split()
for word in words:
word = word.lower()
# Special case if we're seeing this word for the first time.
if not word in word_count:
word_count[word] = 1
else:
word_count[word] = word_count[word] + 1
input_file.close() # Not strictly required, but good form.
return word_count
def print_words(filename):
"""Prints one per line '<word> <count>' sorted by word for the given file."""
word_count = word_count_dict(filename)
words = sorted(word_count.keys())
for word in words:
print word, word_count[word]
def get_count(word_count_tuple):
"""Returns the count from a dict word/count tuple -- used for custom sort."""
return word_count_tuple[1]
def print_top(filename):
"""Prints the top count listing for the given file."""
word_count = word_count_dict(filename)
# Each item is a (word, count) tuple.
# Sort them so the big counts are first using key=get_count() to extract count.
items = sorted(word_count.items(), key=get_count, reverse=True)
# Print the first 20
for item in items[:20]:
print item[0], item[1]
##### LAB(end solution)
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/ZopeInterface/zope/interface/tests/test_element.py | 2 | 1361 | ##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Element meta-class.
"""
import unittest
from zope.interface.interface import Element
class TestElement(unittest.TestCase):
def test_taggedValues(self):
"""Test that we can update tagged values of more than one element
"""
e1 = Element("foo")
e2 = Element("bar")
e1.setTaggedValue("x", 1)
e2.setTaggedValue("x", 2)
self.assertEqual(e1.getTaggedValue("x"), 1)
self.assertEqual(e2.getTaggedValue("x"), 2)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestElement))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| bsd-3-clause |
flaithbheartaigh/appuifw2 | python/appuifw2.py | 3 | 42271 | #
# appuifw2.py - enhanced appuifw
#
import e32
from appuifw import * # we want to enhance this
# import the C++ module
if e32.s60_version_info >= (3, 0):
import imp
_appuifw2 = imp.load_dynamic('_appuifw2', 'c:\\sys\\bin\\_appuifw2.pyd')
del imp
else:
import _appuifw2
# version
version = '1.00.0'
version_info = tuple(version.split('.'))
# easy way of doing an async call
def schedule(target, *args, **kwargs):
e32.ao_sleep(0, lambda: target(*args, **kwargs))
# common item class used in Listbox2 and Menu
class Item(object):
def __init__(self, title, **kwargs):
kwargs['title'] = title
self.__dict__.update(kwargs)
self.__observers = []
def add_observer(self, observer):
from weakref import ref
if ref(observer) not in self.__observers:
self.__observers.append(ref(observer, self.__del_observer))
def remove_observer(self, observer):
from weakref import ref
self.__del_observer(ref(observer))
def __del_observer(self, ref):
try:
self.__observers.remove(ref)
except ValueError:
pass
def __getattribute__(self, name):
if not name.startswith('_'):
for obref in self.__observers:
ob = obref()
if hasattr(ob, 'handle_item_getattr'):
ob.handle_item_getattr(self, name)
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if not name.startswith('_'):
for obref in self.__observers:
ob = obref()
if hasattr(ob, 'handle_item_setattr'):
ob.handle_item_setattr(self, name)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self.title))
# Listbox2 UI control
class Listbox2(list):
def __init__(self, items=[], select_callback=None, double=False, icons=False, markable=False):
if double:
if icons:
mode = 3
else:
mode = 1
else:
if icons:
mode = 2
else:
mode = 0
if markable:
flags = 0x4001
else:
flags = 0
self.__double = double
self.__icons = icons
self.__markable = markable
list.__init__(self, items)
self._uicontrolapi = _appuifw2.Listbox2_create(mode, flags, select_callback)
for item in self:
self.__item_check(item)
self.__ui_insert(-1, item)
item.add_observer(self)
self.__update_level = 0
self.__update_mode = 0
def __ui_insert(self, pos, item):
if self.__double:
s = u'%s\t%s' % (item.title, getattr(item, 'subtitle', u''))
else:
s = item.title
if self.__icons:
try:
i = item.icon
except AttributeError:
raise TypeError('this listbox requires icons')
else:
i = None
api = self._uicontrolapi
self.begin_update()
try:
self.__update_init(1)
pos = _appuifw2.Listbox2_insert(api, pos, s, i)
if self.__markable:
for i in xrange(len(self)-1, pos, -1):
_appuifw2.Listbox2_select(api, i,
_appuifw2.Listbox2_select(api, i-1))
_appuifw2.Listbox2_select(api, pos,
getattr(item, 'marked', False))
finally:
self.end_update()
def __ui_delete(self, pos, count=1):
api = self._uicontrolapi
self.begin_update()
try:
self.__update_init(2)
if self.__markable:
for i in xrange(pos+count, len(self)):
_appuifw2.Listbox2_select(api, i,
_appuifw2.Listbox2_select(api, i+count))
_appuifw2.Listbox2_delete(api, pos, count)
finally:
self.end_update()
def __item_check(self, item):
if not isinstance(item, Item):
raise TypeError('items must be Item class instances')
def handle_item_getattr(self, item, name):
try:
pos = self.index(item)
except ValueError:
return
if name == 'current':
item.__dict__[name] = (self.current() == pos)
elif name == 'marked':
item.__dict__[name] = _appuifw2.Listbox2_select(self._uicontrolapi, pos)
def handle_item_setattr(self, item, name):
try:
pos = self.index(item)
except ValueError:
return
if name == 'current':
if item.__dict__[name]:
self.set_current(pos)
else:
item.__dict__[name] = (self.current() == pos)
elif name == 'marked':
self.begin_update()
try:
_appuifw2.Listbox2_select(self._uicontrolapi, pos, item.__dict__[name])
finally:
self.end_update()
elif name in ('title', 'subtitle', 'icon'):
self.__setitem__(pos, item)
def begin_update(self):
self.__update_level += 1
def end_update(self):
if self.__update_level == 0:
return
self.__update_level -= 1
if self.__update_level == 0:
self.__update_process()
self.__update_mode = 0
app.refresh()
def __update_init(self, mode):
if mode != self.__update_mode:
self.__update_process()
self.__update_mode = mode
def __update_process(self):
if self.__update_mode == 1:
_appuifw2.Listbox2_finish_insert(self._uicontrolapi)
elif self.__update_mode == 2:
_appuifw2.Listbox2_finish_delete(self._uicontrolapi)
def clear(self):
del self[:]
def append(self, item):
self.__item_check(item)
self.__ui_insert(-1, item)
item.add_observer(self)
list.append(self, item)
def extend(self, lst):
self.begin_update()
try:
for item in lst:
self.__item_check(item)
self.__ui_insert(-1, item)
item.add_observer(self)
list.extend(self, lst)
finally:
self.end_update()
def insert(self, pos, item):
self.__item_check(item)
list.insert(self, pos, item)
if pos < 0:
pos = 0
elif pos > len(self):
pos = -1
self.__ui_insert(pos, item)
item.add_observer(self)
def remove(self, item):
pos = list.index(self, item)
list.remove(self, item)
self.__ui_delete(pos)
item.remove_observer(self)
def pop(self, pos=-1):
item = list.pop(self, pos)
if pos < 0:
pos = len(self)+pos+1
elif pos >= len(self):
pos = -1
self.__ui_delete(pos)
item.remove_observer(self)
return item
def __defcmpfunc(item1, item2):
s1 = (u'%s%s' % (item1.title, getattr(item1, 'text', u''))).lower()
s2 = (u'%s%s' % (item2.title, getattr(item2, 'text', u''))).lower()
return -(s1 < s2)
def sort(self, cmpfunc=__defcmpfunc):
list.sort(self, cmpfunc)
self.begin_update()
try:
self.__ui_delete(0, len(self))
for item in self:
self.__ui_insert(-1, item)
finally:
self.end_update()
def reverse(self):
list.reverse(self)
self.begin_update()
try:
self.__ui.delete(0, len(self))
for item in self:
self.__ui_insert(-1, item)
finally:
self.end_update()
def current(self):
pos = _appuifw2.Listbox2_current(self._uicontrolapi)
if pos is None:
raise IndexError('no item selected')
return pos
def set_current(self, pos):
if pos < 0:
pos += len(self)
self.begin_update()
try:
_appuifw2.Listbox2_current(self._uicontrolapi, pos)
finally:
self.end_update()
def current_item(self):
return self[self.current()]
def top(self):
if not len(self):
raise IndexError('list is empty')
return _appuifw2.Listbox2_top(self._uicontrolapi)
def set_top(self, pos):
if pos < 0:
pos += len(self)
if not (0 <= pos < len(self)):
raise IndexError('index out of range')
self.begin_update()
try:
_appuifw2.Listbox2_top(self._uicontrolapi, pos)
finally:
self.end_update()
def top_item(self):
return self[self.top()]
def bottom(self):
if not len(self):
raise IndexError('list is empty')
return _appuifw2.Listbox2_bottom(self._uicontrolapi)
def bottom_item(self):
return self[self.bottom()]
def make_visible(self, pos):
if pos < 0:
pos += len(self)
if not (0 <= pos < len(self)):
raise IndexError('index out of range')
self.begin_update()
try:
_appuifw2.Listbox2_make_visible(self._uicontrolapi, pos)
finally:
self.end_update()
def bind(self, event_code, callback):
_appuifw2.bind(self._uicontrolapi, event_code, callback)
def marked(self):
return _appuifw2.Listbox2_selection(self._uicontrolapi)
def marked_items(self):
return [self[x] for x in self.selected()]
def clear_marked(self):
_appuifw2.Listbox2_clear_selection(self._uicontrolapi)
def empty_list_text(self):
return _appuifw2.Listbox2_empty_text(self._uicontrolapi)
def set_empty_list_text(self, text):
self.begin_update()
try:
_appuifw2.Listbox2_empty_text(self._uicontrolapi, text)
finally:
self.end_update()
if e32.s60_version_info >= (3, 0):
def highlight_rect(self):
return _appuifw2.Listbox2_highlight_rect(self._uicontrolapi)
def __setitem__(self, pos, item):
olditem = self[pos]
self.__item_check(item)
list.__setitem__(self, pos, item)
olditem.remove_observer(self)
if pos < 0:
pos = len(self)+pos
self.begin_update()
try:
self.__ui_delete(pos)
self.__ui_insert(pos, item)
finally:
self.end_update()
item.add_observer(self)
def __delitem__(self, pos):
item = self[pos]
list.__delitem__(self, pos)
item.remove_observer(self)
if pos < 0:
pos = len(self)+pos
self.__ui_delete(pos)
def __setslice__(self, i, j, items):
olditems = self[i:j]
list.__setslice__(self, i, j, items)
for item in olditems:
item.remove_observer(self)
ln = len(self)
i = min(ln, max(0, i))
j = min(ln, max(i, j))
self.begin_update()
try:
self.__ui_delete(i, j-i)
for pos in xrange(i, i+len(items)):
self.__ui_insert(pos, self[pos])
finally:
self.end_update()
def __delslice__(self, i, j):
items = self[i:j]
size = len(self)
list.__delslice__(self, i, j)
for item in items:
item.remove_observer(self)
i = min(size, max(0, i))
j = min(size, max(i, j))
self.__ui_delete(i, j-i)
def __repr__(self):
return '<%s instance at 0x%08X; %d items>' % (self.__class__.__name__, id(self), len(self))
# emulation of appuifw.Listbox using _appuifw2.Listbox2_xxx functions;
# the only difference is the scrollbar in this implementation
class Listbox(object):
def __init__(self, items, select_callback=None):
# check items and extract the mode
self.__set_items(items, just_check=True)
self._uicontrolapi = _appuifw2.Listbox2_create(self.__mode, 0, select_callback)
# now set the items
self.__set_items(items)
def __set_items(self, items, just_check=False):
if not isinstance(items, list):
raise TypeError('argument 1 must be a list')
if not items:
raise ValueError('non-empty list expected')
item = items[0]
mode = 0
if isinstance(item, tuple):
if len(item) == 2:
if isinstance(item[1], unicode):
mode = 1
else:
mode = 2
elif len(item) == 3:
mode = 3
else:
raise ValueError('tuple must include 2 or 3 elements')
if just_check:
self.__mode = mode
else:
if mode != self.__mode:
raise ValueError('changing of listbox type not permitted')
api = self._uicontrolapi
_appuifw2.Listbox2_delete(api)
#_appuifw2.Listbox2_finish_delete(api)
if mode == 0:
for item in items:
_appuifw2.Listbox2_insert(api, -1, item)
elif mode == 1:
for item in items:
_appuifw2.Listbox2_insert(api, -1, u'%s\t%s' % (item[0], item[1]))
elif mode == 2:
for item in items:
_appuifw2.Listbox2_insert(api, -1, item[0], item[1])
else:
for item in items:
_appuifw2.Listbox2_insert(api, -1, u'%s\t%s' % (item[0], item[1]), item[2])
_appuifw2.Listbox2_finish_insert(api)
app.refresh()
def bind(self, event_code, callback):
_appuifw2.bind(self._uicontrolapi, event_code, callback)
def current(self):
# Listbox2_current() returns a valid index since we always have items in the list
return _appuifw2.Listbox2_current(self._uicontrolapi)
def set_list(self, items, current=0):
app.begin_refresh()
try:
self.__set_items(items)
current = min(len(items)-1, max(0, current))
_appuifw2.Listbox2_current(self._uicontrolapi, current)
finally:
app.end_refresh()
if e32.s60_version_info >= (3, 0):
def __get_size(self):
return _appuifw2.Listbox2_highlight_rect(self._uicontrolapi)[2:]
size = property(__get_size)
def __get_position(self):
return _appuifw2.Listbox2_highlight_rect(self._uicontrolapi)[:2]
position = property(__get_position)
# enhanced appuifw.Text UI control,
# provides the same interface with more features
class Text(object):
def __init__(self, text=u'', move_callback=None, edit_callback=None,
skinned=False, scrollbar=False, word_wrap=True, t9=True,
indicator=True, fixed_case=False, flags=0x00009108, editor_flags=0):
# default flags are:
# EAllowUndo|EAlwaysShowSelection|EInclusiveSizeFixed|ENoAutoSelection
# flags are defined in eikedwin.h
if not word_wrap:
flags |= 0x00000020 # ENoWrap
self._uicontrolapi = _appuifw2.Text2_create(flags, scrollbar, skinned,
move_callback, edit_callback)
if text:
self.set(text)
self.set_pos(0)
if not t9:
editor_flags |= 0x002 # EFlagNoT9
if not indicator:
editor_flags |= 0x004 # EFlagNoEditIndicators
if fixed_case:
editor_flags |= 0x001 # EFlagFixedCase
# editor flags are defined in eikon.hrh
if editor_flags:
_appuifw2.Text2_set_editor_flags(self._uicontrolapi, editor_flags)
def add(self, text):
_appuifw2.Text2_add_text(self._uicontrolapi, text)
def insert(self, pos, text):
_appuifw2.Text2_insert_text(self._uicontrolapi, pos, text)
def bind(self, event_code, callback):
_appuifw2.bind(self._uicontrolapi, event_code, callback)
def clear(self):
_appuifw2.Text2_clear_text(self._uicontrolapi)
def delete(self, pos=0, length=-1):
_appuifw2.Text2_delete_text(self._uicontrolapi, pos, length)
def apply(self, pos=0, length=-1):
_appuifw2.Text2_apply(self._uicontrolapi, pos, length)
def get_pos(self):
return _appuifw2.Text2_get_pos(self._uicontrolapi)
def set_pos(self, cursor_pos, select=False):
_appuifw2.Text2_set_pos(self._uicontrolapi, cursor_pos, select)
# deprecated, use len(self) instead, kept for compatibility
def len(self):
return _appuifw2.Text2_text_length(self._uicontrolapi)
def get(self, pos=0, length=-1):
return _appuifw2.Text2_get_text(self._uicontrolapi, pos, length)
def set(self, text):
_appuifw2.Text2_set_text(self._uicontrolapi, text)
def __len__(self):
return _appuifw2.Text2_text_length(self._uicontrolapi)
def __getitem__(self, i):
return _appuifw2.Text2_get_text(self._uicontrolapi, i, 1)
def __setitem__(self, i, value):
_appuifw2.Text2_delete_text(self._uicontrolapi, i, len(value))
_appuifw2.Text2_insert_text(self._uicontrolapi, i, value)
def __delitem__(self, i):
_appuifw2.Text2_delete_text(self._uicontrolapi, i, 1)
def __getslice__(self, i, j):
ln = len(self)
i = min(ln, max(0, i))
j = min(ln, max(i, j))
return _appuifw2.Text2_get_text(self._uicontrolapi, i, j-i)
def __setslice__(self, i, j, value):
ln = len(self)
i = min(ln, max(0, i))
j = min(ln, max(i, j))
_appuifw2.Text2_delete_text(self._uicontrolapi, i, j-i)
_appuifw2.Text2_insert_text(self._uicontrolapi, i, value)
def __delslice__(self, i, j):
ln = len(self)
i = min(ln, max(0, i))
j = min(ln, max(i, j))
return _appuifw2.Text2_delete_text(self._uicontrolapi, i, j-i)
def get_selection(self):
pos, anchor = _appuifw2.Text2_get_selection(self._uicontrolapi)
i = min(pos, anchor)
j = max(pos, anchor)
return (pos, anchor, _appuifw2.Text2_get_text(self._uicontrolapi, i, j-i))
def set_selection(self, pos, anchor):
_appuifw2.Text2_set_selection(self._uicontrolapi, pos, anchor)
def set_word_wrap(self, word_wrap):
_appuifw2.Text2_set_word_wrap(self._uicontrolapi, word_wrap)
def set_limit(self, limit):
_appuifw2.Text2_set_limit(self._uicontrolapi, limit)
def get_word_info(self, pos=-1):
return _appuifw2.Text2_get_word_info(self._uicontrolapi, pos)
def set_case(self, case):
_appuifw2.Text2_set_case(self._uicontrolapi, case)
def set_allowed_cases(self, cases):
_appuifw2.Text2_set_allowed_cases(self._uicontrolapi, cases)
def set_input_mode(self, mode):
_appuifw2.Text2_set_input_mode(self._uicontrolapi, mode)
def set_allowed_input_modes(self, modes):
_appuifw2.Text2_set_allowed_input_modes(self._uicontrolapi, modes)
def set_undo_buffer(self, pos=0, length=-1):
return _appuifw2.Text2_set_undo_buffer(self._uicontrolapi, pos, length)
def move(self, direction, select=False):
_appuifw2.Text2_move(self._uicontrolapi, direction, select)
def move_display(self, direction):
_appuifw2.Text2_move_display(self._uicontrolapi, direction)
def xy2pos(self, coords):
return _appuifw2.Text2_xy2pos(self._uicontrolapi, coords)
def pos2xy(self, pos):
return _appuifw2.Text2_pos2xy(self._uicontrolapi, pos)
# properties
for name in ('color', 'focus', 'font', 'highlight_color', 'style', 'read_only',
'has_changed', 'allow_undo', 'indicator_text'):
exec '%s = property(lambda self: _appuifw2.Text2_get_%s(self._uicontrolapi),' \
'lambda self, value: _appuifw2.Text2_set_%s(self._uicontrolapi, value))' % \
(name, name, name)
# methods without arguments
for name in ('clear', 'select_all', 'clear_selection', 'undo', 'clear_undo',
'can_undo', 'can_cut', 'cut', 'can_copy', 'copy', 'can_paste', 'paste'):
exec '%s = lambda self: _appuifw2.Text2_%s(self._uicontrolapi)' % \
(name, name)
del name
# Text_display UI control, shows text in read-only mode without
# cursor and handles scrolling
class Text_display(Text):
def __init__(self, text=u'', skinned=False, scrollbar=False,
scroll_by_line=False):
Text.__init__(self, text, skinned=skinned, scrollbar=scrollbar,
indicator=False, flags=0x0400B908, editor_flags=0x008)
# flags are same as defined in Text, plus:
# EDisplayOnly|EReadOnly|EAvkonDisableCursor
# editor flags are:
# EFlagNoLRNavigation
from key_codes import EKeyUpArrow, EKeyDownArrow
if scroll_by_line:
self.bind(EKeyUpArrow, lambda: self.move_display(EFLineUp))
self.bind(EKeyDownArrow, lambda: self.move_display(EFLineDown))
else:
self.bind(EKeyUpArrow, lambda: self.move_display(EFPageUp))
self.bind(EKeyDownArrow, lambda: self.move_display(EFPageDown))
# case mode flags for Text.set_case and Text.set_allowed_cases
EUpperCase = 1
ELowerCase = 2
ETextCase = 4
EAllCases = EUpperCase | ELowerCase | ETextCase
# input mode flags for Text.set_input_mode and Text.set_allowed_input_modes
ENullInputMode = 0x0
# All text input modes that are available in current language.
ETextInputMode = 0x1
ENumericInputMode = 0x2
ESecretAlphaInputMode = 0x4
# Japanese input modes - only effective in Japanese variant.
EKatakanaInputMode = 0x8 # half-width Katakana
EFullWidthTextInputMode = 0x10 # full-width latin alphabet
EFullWidthNumericInputMode = 0x20 # full-width numeric (0-9)
EFullWidthKatakanaInputMode = 0x40 # full-width Katakana
EHiraganaKanjiInputMode = 0x80 # Hiragana/Kanji
EHiraganaInputMode = 0x100 # only Hiragana
EHalfWidthTextInputMode = 0x200 # half-width Latin alphabet
EAllInputModes = ETextInputMode | ENumericInputMode | ESecretAlphaInputMode | \
EKatakanaInputMode | EFullWidthTextInputMode | EFullWidthNumericInputMode | \
EFullWidthKatakanaInputMode | EHiraganaKanjiInputMode | EHalfWidthTextInputMode
# direction flags for Text.move and Text.move_display
EFNoMovement = 0
EFLeft = 1
EFRight = 2
EFLineUp = 3
EFLineDown = 4
EFPageUp = 5
EFPageDown = 6
EFLineBeg = 7
EFLineEnd = 8
# Menu class, easy handling of menus
class Menu(list):
def __init__(self, title=u'', items=[]):
if title:
self.title = title
else:
self.title = u''
list.__init__(self, items)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self.title))
def __defcompare(a, b):
return -(a.title.lower() < b.title.lower())
def sort(self, compare=__defcompare):
list.sort(self, compare)
def find(self, **kwargs):
items = []
for item in self:
for name, val in kwargs.items():
if not hasattr(item, name) or getattr(item, name) != val:
break
else:
items.append(item)
return tuple(items)
def popup(self, full_screen=False, search_field=False):
menu = self
while True:
items = [x for x in menu if not getattr(x, 'hidden', False)]
titles = [x.title for x in items]
if full_screen:
if menu.title:
title = app.title
app.title = menu.title
i = selection_list(titles, search_field)
if menu.title:
app.title = title
elif menu:
i = popup_menu(titles, menu.title)
else:
i = None
if i is None or i < 0:
item = None
break
item = items[i]
try:
menu = item.submenu
except AttributeError:
break
return item
def multi_selection(self, style='checkbox', search_field=False):
items = [x for x in self if not getattr(x, 'hidden', False)]
titles = [x.title for x in items]
if menu.title:
title = app.title
app.title = menu.title
r = multi_selection_list(titles, style, search_field)
if menu.title:
app.title = title
return [items[x] for x in r]
def as_fw_menu(self):
menu = []
for item in self:
if getattr(item, 'hidden', False):
continue
try:
second = item.submenu.as_fw_menu()
except AttributeError:
second = getattr(item, 'callback', lambda: None)
flags = getattr(item, 'flags', 0)
if getattr(item, 'dimmed', False):
flags |= 0x1 # EEikMenuItemDimmed
if getattr(item, 'checked', False):
flags |= 0x88 # EEikMenuItemCheckBox|EEikMenuItemSymbolOn
if flags:
menu.append((item.title, second, flags))
else:
menu.append((item.title, second))
return menu
def copy(self):
items = []
for item in self:
item = Item(item.__dict__)
try:
item.submenu = item.submenu.copy()
except AttributeError:
pass
items.append(item)
return Menu(self.title, items)
# View class, easy handling of application views
class View(object):
all_attributes = ('body', 'exit_key_handler', 'menu', 'screen', 'title',
'init_menu_handler', 'menu_key_handler', 'menu_key_text', 'exit_key_text',
'navi_text', 'left_navi_arrow', 'right_navi_arrow')
def __init__(self):
self.__body = None
self.__exit_key_handler = self.close
self.__menu = None
self.__screen = 'normal'
self.__title = unicode(self.__class__.__name__)
self.__init_menu_handler = self.init_menu
self.__menu_key_handler = self.handle_menu_key
if app.view is not None:
self.__menu_key_text = app._Application__views[0].menu_key_text
self.__exit_key_text = app._Application__views[0].exit_key_text
else:
self.__menu_key_text = app.menu_key_text
self.__exit_key_text = app.exit_key_text
self.__navi_text = u''
self.__left_navi_arrow = False
self.__right_navi_arrow = False
self.__tabs = ([], None)
self.__tab_index = 0
self.__lock = None
def shown(self):
# called when this view is shown
pass
def hidden(self):
# called when other view is shown over this view
pass
def close(self):
# closes the view and removes it from the app;
# this is the default Exit key handler
app._Application__pop_view(self)
if self.__lock is not None:
self.__lock.signal()
def wait_for_close(self):
# blocks until self.close() is called
if self not in app._Application__views:
raise AssertionError('View not opened')
self.__lock = e32.Ao_lock()
self.__lock.wait()
self.__lock = None
def init_menu(self):
# called shortly before the menu is shown;
# may use self.menu to modify the menu
pass
def handle_menu_key(self):
# default Menu key handler; use only if the view doesn't have
# a menu (self.menu is None), if it has a dynamic menu, override
# self.init_menu() instead
pass
def set_tabs(self, tab_texts, callback):
if app.view is self:
app.set_tabs(tab_texts, callback)
self.__tabs = (tab_texts, callback)
self.__tab_index = 0
def activate_tab(self, index):
if app.view is self:
app.activate_tab(index)
self.__tab_index = index
# create the properties
for name in all_attributes:
exec 'def __get_%s(self):\n return self._View__%s\n' % (name, name)
exec 'def __set_%s(self, value):\n self._View__%s = value\n if app.view is self:\n app.%s = value\n' % \
(name, name, name)
exec '%s = property(__get_%s, __set_%s)' % (name, name, name)
del name
# Application class, the only instance of this class will be the 'app'
class Application(object):
# original app
from appuifw import app as __app
# inherit methods (properties are implemented in getattr/setattr)
for name in dir(__app):
exec '%s = _Application__app.%s' % (name, name)
del name
def __init__(self):
# app is the only instance of this class; during the instantiation app
# is still appuifw.app so we just have to check its type
global app
if isinstance(app, self.__class__):
raise TypeError('%s already instantiated' % self.__class__.__name__)
self.__tabs = ([], None)
self.__tab_index = 0
self.__menu = None
self.__menu_id = 0
self.__menu_key_handler = None
self.__init_menu_handler = None
self.__navi_text = u''
self.__left_navi_arrow = False
self.__right_navi_arrow = False
self.__navi = None
# creates a menu init callback and stores a reference to it
# (callback is removed if this reference dies)
# this call also activates the flags for the menu which
# makes checkmarks possible
self.__menu_dyn_init_callback = \
_appuifw2.patch_menu_dyn_init_callback(self.__dyn_init_menu)
self.__refresh_level = 0
self.__refresh_pending = False
self.__views = []
def begin_refresh(self):
self.__refresh_level += 1
def end_refresh(self):
self.__refresh_level -= 1
if self.__refresh_level <= 0:
self.__refresh_level = 0
if self.__refresh_pending:
_appuifw2.refresh()
self.__refresh_pending = False
def refresh(self):
if self.__refresh_level == 0:
_appuifw2.refresh()
else:
self.__refresh_pending = True
def set_tabs(self, tab_texts, callback):
self.__app.set_tabs(tab_texts, callback)
self.__tabs = (tab_texts, callback)
self.__tab_index = 0
def activate_tab(self, index):
self.__app.activate_tab(index)
self.__tab_index = index
def __get_body(self):
return self.__app.body
def __set_body(self, value):
self.__app.body = value
def __get_exit_key_handler(self):
return self.__app.exit_key_handler
def __set_exit_key_handler(self, value):
self.__app.exit_key_handler = value
def __get_menu(self):
if id(self.__app.menu) != self.__menu_id:
return self.__app.menu
return self.__menu
def __set_menu(self, value):
self.__menu = value
self.__update_menu()
def __dyn_init_menu(self):
if self.__menu_key_handler is not None:
schedule(self.__menu_key_handler)
if self.__init_menu_handler is not None:
self.__init_menu_handler()
if id(self.__app.menu) == self.__menu_id:
self.__update_menu()
def __update_menu(self):
if hasattr(self.__menu, 'as_fw_menu'): # Menu()
self.__app.menu = self.__menu.as_fw_menu()
elif self.__menu is None:
self.__app.menu = []
else:
self.__app.menu = self.__menu # a list of tuples
self.__menu_id = id(self.__app.menu)
def __get_screen(self):
return self.__app.screen
def __set_screen(self, value):
self.__app.screen = value
def __get_title(self):
return self.__app.title
def __set_title(self, value):
self.__app.title = value
def __get_focus(self):
return self.__app.focus
def __set_focus(self, value):
self.__app.focus = value
if e32.s60_version_info >= (3, 0):
def __get_orientation(self):
return self.__app.orientation
def __set_orientation(self, value):
self.__app.orientation = value
def __get_init_menu_handler(self):
return self.__init_menu_handler
def __set_init_menu_handler(self, value):
self.__init_menu_handler = value
def __get_menu_key_handler(self):
return self.__menu_key_handler
def __set_menu_key_handler(self, value):
self.__menu_key_handler = value
def __get_menu_key_text(self):
# EAknSoftkeyOptions
return _appuifw2.command_text(3000)
def __set_menu_key_text(self, value):
# EAknSoftkeyOptions
_appuifw2.command_text(3000, value)
def __get_exit_key_text(self):
# EAknSoftkeyExit
return _appuifw2.command_text(3009)
def __set_exit_key_text(self, value):
# EAknSoftkeyExit
_appuifw2.command_text(3009, value)
def __get_navi_text(self):
return self.__navi_text
def __set_navi_text(self, value):
self.__navi_text = value
self.__set_navi()
def __get_left_navi_arrow(self):
return self.__left_navi_arrow
def __set_left_navi_arrow(self, value):
self.__left_navi_arrow = bool(value)
self.__set_navi()
def __get_right_navi_arrow(self):
return self.__right_navi_arrow
def __set_right_navi_arrow(self, value):
self.__right_navi_arrow = bool(value)
self.__set_navi()
def __set_navi(self):
if self.__navi_text or self.__left_navi_arrow or \
self.__right_navi_arrow:
self.__navi = _appuifw2.set_navi(self.__navi_text,
self.__left_navi_arrow, self.__right_navi_arrow)
else:
self.__navi = None
# Note. The self.__navi is a navi pane indicator reference. The
# indicator is removed if this reference dies.
def __get_view(self):
try:
return self.__views[-1]
except IndexError:
return None
def __set_view(self, value):
if not isinstance(value, View):
raise TypeError('expected a View object')
if not self.__views:
# no views yet, store the app state in a new view
appview = View()
for name in View.all_attributes:
setattr(appview, name, getattr(self, name))
appview.set_tabs(*self.__tabs)
appview.activate_tab(self.__tab_index)
try:
self.__views.append(appview)
appview.shown()
except:
del self.__views[0]
raise
try:
# show the new view
self.__views.append(value)
self.__sync_view()
# hide the old view
self.__views[-2].hidden()
value.shown()
except:
# error, remove the new view
del self.__views[-1]
if len(self.__views) == 1:
# remove the app view
del self.__views[0]
raise
def __pop_view(self, view=None):
if view is None:
i = -1
else:
try:
i = self.__views.index(view)
except ValueError:
return
curr = self.view
try:
self.__views.pop(i)
except IndexError:
return
try:
if self.view != curr:
self.view.shown()
self.__sync_view()
curr.hidden()
finally:
if len(self.__views) == 1:
# remove the app view
del self.__views[0]
def __sync_view(self):
try:
view = self.__views[-1]
except IndexError:
return
for name in View.all_attributes:
setattr(self, name, getattr(view, name))
self.set_tabs(*view._View__tabs)
self.activate_tab(view._View__tab_index)
# original properties
body = property(__get_body, __set_body)
exit_key_handler = property(__get_exit_key_handler, __set_exit_key_handler)
menu = property(__get_menu, __set_menu)
screen = property(__get_screen, __set_screen)
title = property(__get_title, __set_title)
focus = property(__get_focus, __set_focus)
if e32.s60_version_info >= (3, 0):
orientation = property(__get_orientation, __set_orientation)
# new properties
init_menu_handler = property(__get_init_menu_handler, __set_init_menu_handler)
menu_key_handler = property(__get_menu_key_handler, __set_menu_key_handler)
menu_key_text = property(__get_menu_key_text, __set_menu_key_text)
exit_key_text = property(__get_exit_key_text, __set_exit_key_text)
navi_text = property(__get_navi_text, __set_navi_text)
left_navi_arrow = property(__get_left_navi_arrow, __set_left_navi_arrow)
right_navi_arrow = property(__get_right_navi_arrow, __set_right_navi_arrow)
view = property(__get_view, __set_view)
app = Application()
def get_skin_color(color_id):
return _appuifw2.get_skin_color(*color_id)
# Color IDs.
EMainAreaTextColor = (0x10005A26, 0x3300, 5)
get_language = _appuifw2.get_language
# Languages.
ELangTest = 0 # Value used for testing - does not represent a language.
ELangEnglish = 1 # UK English.
ELangFrench = 2 # French.
ELangGerman = 3 # German.
ELangSpanish = 4 # Spanish.
ELangItalian = 5 # Italian.
ELangSwedish = 6 # Swedish.
ELangDanish = 7 # Danish.
ELangNorwegian = 8 # Norwegian.
ELangFinnish = 9 # Finnish.
ELangAmerican = 10 # American.
ELangSwissFrench = 11 # Swiss French.
ELangSwissGerman = 12 # Swiss German.
ELangPortuguese = 13 # Portuguese.
ELangTurkish = 14 # Turkish.
ELangIcelandic = 15 # Icelandic.
ELangRussian = 16 # Russian.
ELangHungarian = 17 # Hungarian.
ELangDutch = 18 # Dutch.
ELangBelgianFlemish = 19 # Belgian Flemish.
ELangAustralian = 20 # Australian English.
ELangBelgianFrench = 21 # Belgian French.
ELangAustrian = 22 # Austrian German.
ELangNewZealand = 23 # New Zealand English.
ELangInternationalFrench = 24 # International French.
ELangCzech = 25 # Czech.
ELangSlovak = 26 # Slovak.
ELangPolish = 27 # Polish.
ELangSlovenian = 28 # Slovenian.
ELangTaiwanChinese = 29 # Taiwanese Chinese.
ELangHongKongChinese = 30 # Hong Kong Chinese.
ELangPrcChinese = 31 # Peoples Republic of China's Chinese.
ELangJapanese = 32 # Japanese.
ELangThai = 33 # Thai.
ELangAfrikaans = 34 # Afrikaans.
ELangAlbanian = 35 # Albanian.
ELangAmharic = 36 # Amharic.
ELangArabic = 37 # Arabic.
ELangArmenian = 38 # Armenian.
ELangTagalog = 39 # Tagalog.
ELangBelarussian = 40 # Belarussian.
ELangBengali = 41 # Bengali.
ELangBulgarian = 42 # Bulgarian.
ELangBurmese = 43 # Burmese.
ELangCatalan = 44 # Catalan.
ELangCroatian = 45 # Croatian.
ELangCanadianEnglish = 46 # Canadian English.
ELangInternationalEnglish = 47 # International English.
ELangSouthAfricanEnglish = 48 # South African English.
ELangEstonian = 49 # Estonian.
ELangFarsi = 50 # Farsi.
ELangCanadianFrench = 51 # Canadian French.
ELangScotsGaelic = 52 # Gaelic.
ELangGeorgian = 53 # Georgian.
ELangGreek = 54 # Greek.
ELangCyprusGreek = 55 # Cyprus Greek.
ELangGujarati = 56 # Gujarati.
ELangHebrew = 57 # Hebrew.
ELangHindi = 58 # Hindi.
ELangIndonesian = 59 # Indonesian.
ELangIrish = 60 # Irish.
ELangSwissItalian = 61 # Swiss Italian.
ELangKannada = 62 # Kannada.
ELangKazakh = 63 # Kazakh.
ELangKhmer = 64 # Khmer.
ELangKorean = 65 # Korean.
ELangLao = 66 # Lao.
ELangLatvian = 67 # Latvian.
ELangLithuanian = 68 # Lithuanian.
ELangMacedonian = 69 # Macedonian.
ELangMalay = 70 # Malay.
ELangMalayalam = 71 # Malayalam.
ELangMarathi = 72 # Marathi.
ELangMoldavian = 73 # Moldovian.
ELangMongolian = 74 # Mongolian.
ELangNorwegianNynorsk = 75 # Norwegian Nynorsk.
ELangBrazilianPortuguese = 76 # Brazilian Portuguese.
ELangPunjabi = 77 # Punjabi.
ELangRomanian = 78 # Romanian.
ELangSerbian = 79 # Serbian.
ELangSinhalese = 80 # Sinhalese.
ELangSomali = 81 # Somali.
ELangInternationalSpanish = 82 # International Spanish.
ELangLatinAmericanSpanish = 83 # American Spanish.
ELangSwahili = 84 # Swahili.
ELangFinlandSwedish = 85 # Finland Swedish.
ELangReserved1 = 86 # Reserved for future use.
ELangTamil = 87 # Tamil.
ELangTelugu = 88 # Telugu.
ELangTibetan = 89 # Tibetan.
ELangTigrinya = 90 # Tigrinya.
ELangCyprusTurkish = 91 # Cyprus Turkish.
ELangTurkmen = 92 # Turkmen.
ELangUkrainian = 93 # Ukrainian.
ELangUrdu = 94 # Urdu.
ELangReserved2 = 95 # Reserved for future use.
ELangVietnamese = 96 # Vietnamese.
ELangWelsh = 97 # Welsh.
ELangZulu = 98 # Zulu.
ELangOther = 99 # Use of this value is deprecated.
ELangNone = 0xFFFF # Indicates the final language in the language downgrade path.
ELangMaximum = ELangNone # This must always be equal to the last (largest) value.
# query() with additional 'ok' and 'cancel' args (softkey labels)
def query(label, type, initial_value=None, ok=None, cancel=None):
if ok is not None or cancel is not None:
def set_ok_cancel(ok, cancel):
if not abort:
if ok is not None:
try:
_appuifw2.command_text(-2, ok)
except SymbianError:
pass
if cancel is not None:
try:
_appuifw2.command_text(-1, cancel)
except SymbianError:
pass
abort = False
schedule(set_ok_cancel, ok, cancel)
from appuifw import query
try:
return query(label, type, initial_value)
finally:
# if the set_ok_cancel() wasn't called yet, this will abort it
abort = True
| apache-2.0 |
bosstb/HaberPush | youtube_dl/extractor/fourtube.py | 27 | 4585 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
sanitized_Request,
str_to_int,
)
class FourTubeIE(InfoExtractor):
IE_NAME = '4tube'
_VALID_URL = r'https?://(?:www\.)?4tube\.com/videos/(?P<id>\d+)'
_TEST = {
'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black',
'md5': '6516c8ac63b03de06bc8eac14362db4f',
'info_dict': {
'id': '209733',
'ext': 'mp4',
'title': 'Hot Babe Holly Michaels gets her ass stuffed by black',
'uploader': 'WCP Club',
'uploader_id': 'wcp-club',
'upload_date': '20131031',
'timestamp': 1383263892,
'duration': 583,
'view_count': int,
'like_count': int,
'categories': list,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta('name', webpage)
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage))
thumbnail = self._html_search_meta('thumbnailUrl', webpage)
uploader_id = self._html_search_regex(
r'<a class="item-to-subscribe" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">',
webpage, 'uploader id', fatal=False)
uploader = self._html_search_regex(
r'<a class="item-to-subscribe" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">',
webpage, 'uploader', fatal=False)
categories_html = self._search_regex(
r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="[^"]*?list[^"]*?">(.*?)</ul>',
webpage, 'categories', fatal=False)
categories = None
if categories_html:
categories = [
c.strip() for c in re.findall(
r'(?s)<li><a.*?>(.*?)</a>', categories_html)]
view_count = str_to_int(self._search_regex(
r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([0-9,]+)">',
webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserLikes:([0-9,]+)">',
webpage, 'like count', fatal=False))
duration = parse_duration(self._html_search_meta('duration', webpage))
media_id = self._search_regex(
r'<button[^>]+data-id=(["\'])(?P<id>\d+)\1[^>]+data-quality=', webpage,
'media id', default=None, group='id')
sources = [
quality
for _, quality in re.findall(r'<button[^>]+data-quality=(["\'])(.+?)\1', webpage)]
if not (media_id and sources):
player_js = self._download_webpage(
self._search_regex(
r'<script[^>]id=(["\'])playerembed\1[^>]+src=(["\'])(?P<url>.+?)\2',
webpage, 'player JS', group='url'),
video_id, 'Downloading player JS')
params_js = self._search_regex(
r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
player_js, 'initialization parameters')
params = self._parse_json('[%s]' % params_js, video_id)
media_id = params[0]
sources = ['%s' % p for p in params[2]]
token_url = 'http://tkn.4tube.com/{0}/desktop/{1}'.format(
media_id, '+'.join(sources))
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
b'Origin': b'http://www.4tube.com',
}
token_req = sanitized_Request(token_url, b'{}', headers)
tokens = self._download_json(token_req, video_id)
formats = [{
'url': tokens[format]['token'],
'format_id': format + 'p',
'resolution': format + 'p',
'quality': int(format),
} for format in sources]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'categories': categories,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
'timestamp': timestamp,
'like_count': like_count,
'view_count': view_count,
'duration': duration,
'age_limit': 18,
}
| mit |
ev3-ota/hrp2 | modules/btstack/platforms/arduino/docs/update_listings.py | 3 | 11638 | #!/usr/bin/env python
import getopt
import re
import sys
# Defines the names of example groups. Preserves the order in which the example groups will be parsed.
list_of_groups = ["iBeacon", "ANCS", "LE Central", "LE Peripheral"]
# Defines which examples belong to a group. Example is defined as [example file, example title].
list_of_examples = {
"iBeacon": [["iBeacon"], ["iBeaconScanner"]],
"ANCS": [["ANCS"]],
"LE Central": [["LECentral"]],
"LE Peripheral": [["LEPeripheral"]],
}
lst_header = """
"""
lst_ending = """
"""
examples_header = """
"""
example_item = """
- [EXAMPLE_TITLE](#section:EXAMPLE_LABEL): EXAMPLE_DESC.
"""
example_section = """
## EXAMPLE_TITLE: EXAMPLE_DESC
<a name="section:EXAMPLE_LABEL"></a>
"""
example_subsection = """
### SECTION_TITLE
"""
listing_start = """
<a name="FILE_NAME:LISTING_LABEL"></a>
<!-- -->
"""
listing_ending = """
"""
def replacePlaceholder(template, title, lable):
snippet = template.replace("API_TITLE", title).replace("API_LABEL", lable)
return snippet
def latexText(text, ref_prefix):
if not text:
return ""
brief = text.replace(" in the BTstack manual","")
refs = re.match('.*(Listing\s+)(\w+).*',brief)
if refs:
brief = brief.replace(refs.group(1), "[code snippet below]")
brief = brief.replace(refs.group(2), "(#"+ref_prefix+":" + refs.group(2)+")")
refs = re.match('.*(Section\s+)(\w+).*',brief)
if refs:
brief = brief.replace(refs.group(1), "[here]")
brief = brief.replace(refs.group(2), "(#section:"+refs.group(2)+")")
return brief
def isEmptyCommentLine(line):
return re.match('(\s*\*\s*)\n',line)
def isCommentLine(line):
return re.match('(\s*\*\s*).*',line)
def isEndOfComment(line):
return re.match('\s*\*/.*', line)
def isNewItem(line):
return re.match('(\s*\*\s*\-\s*)(.*)',line)
def isTextTag(line):
return re.match('.*(@text).*', line)
def isItemizeTag(line):
return re.match("(\s+\*\s+)(-\s)(.*)", line)
def processTextLine(line, ref_prefix):
if isTextTag(line):
text_line_parts = re.match(".*(@text)(.*)", line)
return " " + latexText(text_line_parts.group(2), ref_prefix)
if isItemizeTag(line):
text_line_parts = re.match("(\s*\*\s*\-\s*)(.*)", line)
return "\n- " + latexText(text_line_parts.group(2), ref_prefix)
text_line_parts = re.match("(\s+\*\s+)(.*)", line)
if text_line_parts:
return " " + latexText(text_line_parts.group(2), ref_prefix)
return ""
def getExampleTitle(example_path):
example_title = ''
with open(example_path, 'rb') as fin:
for line in fin:
parts = re.match('.*(EXAMPLE_START)\((.*)\):\s*(.*)(\*/)?\n',line)
if parts:
example_title = parts.group(3).replace("_","\_")
continue
return example_title
class State:
SearchExampleStart = 0
SearchListingStart = 1
SearchListingPause = 2
SearchListingResume = 3
SearchListingEnd = 4
SearchItemizeEnd = 5
ReachedExampleEnd = 6
text_block = ''
itemize_block = ''
def writeTextBlock(aout, lstStarted):
global text_block
if text_block and not lstStarted:
aout.write(text_block)
text_block = ''
def writeItemizeBlock(aout, lstStarted):
global itemize_block
if itemize_block and not lstStarted:
aout.write(itemize_block + "\n\n")
itemize_block = ''
def writeListings(aout, infile_name, ref_prefix):
global text_block, itemize_block
itemText = None
state = State.SearchExampleStart
code_in_listing = ""
code_identation = " "
skip_code = 0
with open(infile_name, 'rb') as fin:
for line in fin:
if state == State.SearchExampleStart:
parts = re.match('.*(EXAMPLE_START)\((.*)\):\s*(.*)(\*/)?\n',line)
if parts:
lable = parts.group(2).replace("_","")
title = latexText(parts.group(2), ref_prefix)
desc = latexText(parts.group(3), ref_prefix)
aout.write(example_section.replace("EXAMPLE_TITLE", title).replace("EXAMPLE_DESC", desc).replace("EXAMPLE_LABEL", lable))
state = State.SearchListingStart
continue
# detect @section
section_parts = re.match('.*(@section)\s*(.*)(:?\s*.?)\*?/?\n',line)
if section_parts:
aout.write("\n" + example_subsection.replace("SECTION_TITLE", section_parts.group(2)))
continue
# detect @subsection
subsection_parts = re.match('.*(@section)\s*(.*)(:?\s*.?)\*?/?\n',line)
if section_parts:
subsubsection = example_subsection.replace("SECTION_TITLE", section_parts.group(2)).replace('section', 'subsection')
aout.write("\n" + subsubsection)
continue
if isTextTag(line):
text_block = text_block + "\n\n" + processTextLine(line, ref_prefix)
continue
skip_code = 0
lstStarted = state != State.SearchListingStart
if text_block or itemize_block:
if isEndOfComment(line) or isEmptyCommentLine(line):
skip_code = 1
if itemize_block:
# finish itemize
writeItemizeBlock(aout, lstStarted)
else:
if isEmptyCommentLine(line):
text_block = text_block + "\n\n"
else:
writeTextBlock(aout, lstStarted)
else:
if isNewItem(line) and not itemize_block:
skip_code = 1
# finish text, start itemize
writeTextBlock(aout, lstStarted)
itemize_block = "\n " + processTextLine(line, ref_prefix)
continue
if itemize_block:
skip_code = 1
itemize_block = itemize_block + processTextLine(line, ref_prefix)
elif isCommentLine(line):
# append text
skip_code = 1
text_block = text_block + processTextLine(line, ref_prefix)
else:
skip_code = 0
#continue
if state == State.SearchListingStart:
parts = re.match('.*(LISTING_START)\((.*)\):\s*(.*)(\s+\*/).*',line)
if parts:
lst_lable = parts.group(2).replace("_","")
lst_caption = latexText(parts.group(3), ref_prefix)
listing = listing_start.replace("LISTING_CAPTION", lst_caption).replace("FILE_NAME", ref_prefix).replace("LISTING_LABEL", lst_lable)
if listing:
aout.write("\n" + listing)
state = State.SearchListingEnd
continue
if state == State.SearchListingEnd:
parts_end = re.match('.*(LISTING_END).*',line)
parts_pause = re.match('.*(LISTING_PAUSE).*',line)
end_comment_parts = re.match('.*(\*/)\s*\n', line);
if parts_end:
aout.write(code_in_listing)
code_in_listing = ""
aout.write(listing_ending)
state = State.SearchListingStart
writeItemizeBlock(aout, 0)
writeTextBlock(aout, 0)
elif parts_pause:
code_in_listing = code_in_listing + code_identation + "...\n"
state = State.SearchListingResume
elif not end_comment_parts:
# aout.write(line)
if not skip_code:
code_in_listing = code_in_listing + code_identation + line.replace(" ", " ")
continue
if state == State.SearchListingResume:
parts = re.match('.*(LISTING_RESUME).*',line)
if parts:
state = State.SearchListingEnd
continue
parts = re.match('.*(EXAMPLE_END).*',line)
if parts:
if state != State.SearchListingStart:
print "Formating error detected"
writeItemizeBlock(aout, 0)
writeTextBlock(aout, 0)
state = State.ReachedExampleEnd
print "Reached end of the example"
# write list of examples
def processExamples(intro_file, examples_folder, examples_ofile):
with open(examples_ofile, 'w') as aout:
with open(intro_file, 'rb') as fin:
for line in fin:
aout.write(line)
for group_title in list_of_groups:
if not list_of_examples.has_key(group_title): continue
examples = list_of_examples[group_title]
for example in examples:
example_path = examples_folder + example[0] + "/" + example[0] + ".ino"
example_title = getExampleTitle(example_path)
example.append(example_title)
aout.write(examples_header)
aout.write("\n\n");
for group_title in list_of_groups:
if not list_of_examples.has_key(group_title): continue
examples = list_of_examples[group_title]
group_title = group_title + " example"
if len(examples) > 1:
group_title = group_title + "s"
group_title = group_title + ":"
aout.write("- " + group_title + "\n");
for example in examples:
ref_prefix = example[0].replace("_", "")
title = latexText(example[0], ref_prefix)
desc = latexText(example[1], ref_prefix)
aout.write(example_item.replace("EXAMPLE_TITLE", title).replace("EXAMPLE_DESC", desc).replace("EXAMPLE_LABEL", ref_prefix))
aout.write("\n")
aout.write("\n")
for group_title in list_of_groups:
if not list_of_examples.has_key(group_title): continue
examples = list_of_examples[group_title]
for example in examples:
file_name = examples_folder + example[0] + "/" + example[0] + ".ino"
writeListings(aout, file_name, example[0].replace("_",""))
def main(argv):
btstack_folder = "../../../"
docs_folder = "docs/examples/"
inputfolder = btstack_folder + "platforms/arduino/examples/"
outputfile = docs_folder + "generated.md"
intro_file = "docs/examples/intro.md"
try:
opts, args = getopt.getopt(argv,"hiso:",["ifolder=","ofile="])
except getopt.GetoptError:
print 'update_listings.py [-i <inputfolder>] [-o <outputfile>]'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'update_listings.py [-i <inputfolder>] [-s] [-o <outputfile>]'
sys.exit()
elif opt in ("-i", "--ifolder"):
inputfolder = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
print 'Input folder is ', inputfolder
print 'Output file is ', outputfile
processExamples(intro_file, inputfolder, outputfile)
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-2.0 |
tony/kivy | kivy/storage/redisstore.py | 8 | 2845 | '''
Redis Store
===========
Store implementation using Redis. You must have redis-py installed.
Usage example::
from kivy.storage.redisstore import RedisStore
params = dict(host='localhost', port=6379, db=14)
store = RedisStore(params)
All the key-value pairs will be stored with a prefix 'store' by default.
You can instantiate the storage with another prefix like this::
from kivy.storage.redisstore import RedisStore
params = dict(host='localhost', port=6379, db=14)
store = RedisStore(params, prefix='mystore2')
The params dictionary will be passed to the redis.StrictRedis class.
See `redis-py <https://github.com/andymccurdy/redis-py>`_.
'''
__all__ = ('RedisStore', )
import os
from json import loads, dumps
from kivy.properties import StringProperty
from kivy.storage import AbstractStore
# don't import redis during the documentation generation
if 'KIVY_DOC' not in os.environ:
import redis
class RedisStore(AbstractStore):
'''Store implementation using a Redis database.
See the :mod:`kivy.storage` module documentation for more informations.
'''
prefix = StringProperty('store')
def __init__(self, redis_params, **kwargs):
self.redis_params = redis_params
self.r = None
super(RedisStore, self).__init__(**kwargs)
def store_load(self):
self.r = redis.StrictRedis(**self.redis_params)
def store_sync(self):
pass
def store_exists(self, key):
key = self.prefix + '.d.' + key
value = self.r.exists(key)
return value
def store_get(self, key):
key = self.prefix + '.d.' + key
if not self.r.exists(key):
raise KeyError(key)
result = self.r.hgetall(key)
for k in result.keys():
result[k] = loads(result[k])
return result
def store_put(self, key, values):
key = self.prefix + '.d.' + key
pipe = self.r.pipeline()
pipe.delete(key)
for k, v in values.iteritems():
pipe.hset(key, k, dumps(v))
pipe.execute()
return True
def store_delete(self, key):
key = self.prefix + '.d.' + key
if not self.r.exists(key):
raise KeyError(key)
return self.r.delete(key)
def store_keys(self):
z = len(self.prefix + '.d.')
return [x[z:] for x in self.r.keys(self.prefix + '.d.*')]
def store_find(self, filters):
fkeys = filters.keys()
fvalues = filters.values()
for key in self.store_keys():
skey = self.prefix + '.d.' + key
svalues = self.r.hmget(skey, fkeys)
if None in svalues:
continue
svalues = [loads(x) for x in svalues]
if fvalues != svalues:
continue
yield key, self.r.hgetall(skey)
| mit |
cubledesarrollo/cubledotes | cuble/suit/config.py | 1 | 2073 | from django.contrib.admin import ModelAdmin
from django.conf import settings
from . import VERSION
def default_config():
return {
'VERSION': VERSION,
# configurable
'ADMIN_NAME': 'Django Suit',
'HEADER_DATE_FORMAT': 'l, jS F Y',
'HEADER_TIME_FORMAT': 'H:i',
# form
'SHOW_REQUIRED_ASTERISK': True,
'CONFIRM_UNSAVED_CHANGES': True,
# menu
'SEARCH_URL': '/admin/auth/user/',
'MENU_OPEN_FIRST_CHILD': True,
'MENU_ICONS': {
'auth': 'icon-lock',
'sites': 'icon-leaf',
},
# 'MENU_EXCLUDE': ('auth.group',),
# 'MENU': (
# 'sites',
# {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
# {'label': 'Settings', 'icon':'icon-cog', 'models': ('auth.user', 'auth.group')},
# {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
# ),
# misc
'LIST_PER_PAGE': 20
}
def get_config(param=None):
config_key = 'SUIT_CONFIG'
if hasattr(settings, config_key):
config = getattr(settings, config_key, {})
else:
config = default_config()
if param:
value = config.get(param)
if value is None:
value = default_config().get(param)
return value
return config
# Reverse default actions position
ModelAdmin.actions_on_top = False
ModelAdmin.actions_on_bottom = True
# Set global list_per_page
ModelAdmin.list_per_page = get_config('LIST_PER_PAGE')
def setup_filer():
from suit.widgets import AutosizedTextarea
from filer.admin.imageadmin import ImageAdminForm
from filer.admin.fileadmin import FileAdminChangeFrom
def ensure_meta_widgets(meta_cls):
if not hasattr(meta_cls, 'widgets'):
meta_cls.widgets = {}
meta_cls.widgets['description'] = AutosizedTextarea
ensure_meta_widgets(ImageAdminForm.Meta)
ensure_meta_widgets(FileAdminChangeFrom.Meta)
# if 'filer' in settings.INSTALLED_APPS:
# setup_filer()
| mit |
towerjoo/mindsbook | django/contrib/sessions/tests.py | 16 | 11096 | import base64
from datetime import datetime, timedelta
import pickle
import shutil
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.base import SessionBase
from django.contrib.sessions.models import Session
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import unittest
from django.utils.hashcompat import md5_constructor
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertTrue(self.session.has_key('some key'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(self.session.values(), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(self.session.values(), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iterkeys()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.itervalues()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iteritems()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x',1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(self.session.items(), [('x',1)])
self.session.clear()
self.assertEqual(self.session.items(), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = self.session.items()
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(self.session.items(), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
session = self.backend('1')
session.save()
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
# Using seconds
self.session.set_expiry(10)
delta = self.session.get_expiry_date() - datetime.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_timedelta(self):
# Using timedelta
self.session.set_expiry(timedelta(seconds=10))
delta = self.session.get_expiry_date() - datetime.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_timedelta(self):
# Using timedelta
self.session.set_expiry(datetime.now() + timedelta(seconds=10))
delta = self.session.get_expiry_date() - datetime.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
try:
try:
original_expire_at_browser_close = settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = False
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = True
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
except:
raise
finally:
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = original_expire_at_browser_close
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_django12(self):
# Ensure we can decode values encoded using Django 1.2
# Hard code the Django 1.2 method here:
def encode(session_dict):
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
data = {'a test key': 'a test value'}
encoded = encode(data)
self.assertEqual(self.session.decode(encoded), data)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
super(FileSessionTests, self).setUp()
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
def tearDown(self):
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
super(FileSessionTests, self).tearDown()
def test_configuration_check(self):
# Make sure the file backend checks for a good storage dir
settings.SESSION_FILE_PATH = "/if/this/directory/exists/you/have/a/weird/computer"
self.assertRaises(ImproperlyConfigured, self.backend)
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
| bsd-3-clause |
xindaya/bazel | tools/build_defs/docker/sha256.py | 20 | 1050 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper to have a portable SHA-256 tool."""
# TODO(dmarting): instead of this tool we should make SHA-256 of artifacts
# available in Skylark.
import hashlib
import sys
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: %s input output" % sys.argv[0]
sys.exit(-1)
with open(sys.argv[2], "w") as outputfile:
with open(sys.argv[1], "rb") as inputfile:
outputfile.write(hashlib.sha256(inputfile.read()).hexdigest())
| apache-2.0 |
openbaoz/titanium_mobile | support/common/simplejson/encoder.py | 343 | 16033 | """Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| apache-2.0 |
thurt/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/lib2to3/pgen2/driver.py | 49 | 4809 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import os
import logging
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = ""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, debug=False):
"""Parse a file and return the syntax tree."""
stream = open(filename)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(generate_lines(text).next)
return self.parse_tokens(tokens, debug)
def generate_lines(text):
"""Generator that behaves like readline without using StringIO."""
for line in text.splitlines(True):
yield line
while True:
yield ""
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError, e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
| apache-2.0 |
anksp21/Community-Zenpacks | ZenPacks.jschroeder.GangliaMonitor/ZenPacks/jschroeder/GangliaMonitor/datasources/GangliaMonitorDataSource.py | 3 | 3746 | ###########################################################################
#
# Copyright (C) 2010, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
__doc__='''GangliaMonitorDataSource.py
Defines datasource for GangliaMonitor
'''
import Products.ZenModel.BasicDataSource as BasicDataSource
from Products.ZenModel.ZenPackPersistence import ZenPackPersistence
from AccessControl import ClassSecurityInfo, Permissions
from Products.ZenUtils.ZenTales import talesCompile, getEngine
import os
class GangliaMonitorDataSource(ZenPackPersistence,
BasicDataSource.BasicDataSource):
GANGLIA_MONITOR = 'GangliaMonitor'
ZENPACKID = 'ZenPacks.jschroeder.GangliaMonitor'
sourcetypes = (GANGLIA_MONITOR,)
sourcetype = GANGLIA_MONITOR
timeout = 60
eventClass = '/Status/Ganglia'
host = '${dev/zGangliaHost}'
port = '${dev/zGangliaPort}'
_properties = BasicDataSource.BasicDataSource._properties + (
{'id':'timeout', 'type':'int', 'mode':'w'},
{'id':'eventClass', 'type':'string', 'mode':'w'},
{'id':'host', 'type':'string', 'mode':'w'},
{'id':'port', 'type':'string', 'mode':'w'},
)
_relations = BasicDataSource.BasicDataSource._relations + (
)
factory_type_information = (
{
'immediate_view': 'editGangliaMonitorDataSource',
'actions':
(
{ 'id': 'edit',
'name': 'Data Source',
'action': 'editGangliaMonitorDataSource',
'permissions': ( Permissions.view ),
},
)
},
)
security = ClassSecurityInfo()
def __init__(self, id, title=None, buildRelations=True):
BasicDataSource.BasicDataSource.__init__(self, id, title,
buildRelations)
def getDescription(self):
if self.sourcetype == self.GANGLIA_MONITOR:
return self.hostname
return BasicDataSource.BasicDataSource.getDescription(self)
def useZenCommand(self):
return True
def getCommand(self, context):
parts = ['check_ganglia.py', self.host, self.port, '${dev/manageIp}', str(self.cycletime)]
cmd = ' '.join(parts)
cmd = BasicDataSource.BasicDataSource.getCommand(self, context, cmd)
return cmd
def checkCommandPrefix(self, context, cmd):
zp = self.getZenPack(context)
return zp.path('libexec', cmd)
def addDataPoints(self):
for dpname in ('bytes_in', 'bytes_out', 'cpu_idle', 'cpu_nice',
'cpu_system', 'cpu_user', 'cpu_wio', 'disk_free',
'disk_total', 'lastUpdate', 'load_fifteen', 'load_five',
'load_one', 'mem_buffers', 'mem_cached', 'mem_free',
'mem_shared', 'mem_total', 'pkts_in', 'pkts_out',
'proc_run', 'proc_total', 'swap_free', 'swap_total'):
dp = self.manage_addRRDDataPoint(dpname)
dp.rrdtype = 'GAUGE'
dp.rrdmin = 0
def zmanage_editProperties(self, REQUEST=None):
'''validation, etc'''
if REQUEST:
self.addDataPoints()
if not REQUEST.form.get('eventClass', None):
REQUEST.form['eventClass'] = self.__class__.eventClass
return BasicDataSource.BasicDataSource.zmanage_editProperties(self,
REQUEST)
| gpl-2.0 |
s40223154/2017springvcp_hw | plugin/liquid_tags/test_notebook.py | 311 | 3042 | import re
from pelican.tests.support import unittest
from . import notebook
class TestNotebookTagRegex(unittest.TestCase):
def get_argdict(self, markup):
match = notebook.FORMAT.search(markup)
if match:
argdict = match.groupdict()
src = argdict['src']
start = argdict['start']
end = argdict['end']
language = argdict['language']
return src, start, end, language
return None
def test_basic_notebook_tag(self):
markup = u'path/to/thing.ipynb'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertIsNone(language)
def test_basic_notebook_tag_insensitive_to_whitespace(self):
markup = u' path/to/thing.ipynb '
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertIsNone(language)
def test_notebook_tag_with_cells(self):
markup = u'path/to/thing.ipynb cells[1:5]'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertEqual(start, u'1')
self.assertEqual(end, u'5')
self.assertIsNone(language)
def test_notebook_tag_with_alphanumeric_language(self):
markup = u'path/to/thing.ipynb language[python3]'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertEqual(language, u'python3')
def test_notebook_tag_with_symbol_in_name_language(self):
for short_name in [u'c++', u'cpp-objdump', u'c++-objdumb', u'cxx-objdump']:
markup = u'path/to/thing.ipynb language[{}]'.format(short_name)
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertEqual(language, short_name)
def test_notebook_tag_with_language_and_cells(self):
markup = u'path/to/thing.ipynb cells[1:5] language[julia]'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertEqual(start, u'1')
self.assertEqual(end, u'5')
self.assertEqual(language, u'julia')
def test_notebook_tag_with_language_and_cells_and_weird_spaces(self):
markup = u' path/to/thing.ipynb cells[1:5] language[julia] '
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertEqual(start, u'1')
self.assertEqual(end, u'5')
self.assertEqual(language, u'julia')
if __name__ == '__main__':
unittest.main() | agpl-3.0 |
jabesq/home-assistant | homeassistant/components/bmw_connected_drive/binary_sensor.py | 7 | 8099 | """Reads vehicle status from BMW connected drive portal."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import LENGTH_KILOMETERS
from . import DOMAIN as BMW_DOMAIN
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
'lids': ['Doors', 'opening'],
'windows': ['Windows', 'opening'],
'door_lock_state': ['Door lock state', 'safety'],
'lights_parking': ['Parking lights', 'light'],
'condition_based_services': ['Condition based services', 'problem'],
'check_control_messages': ['Control messages', 'problem']
}
SENSOR_TYPES_ELEC = {
'charging_status': ['Charging status', 'power'],
'connection_status': ['Connection status', 'plug']
}
SENSOR_TYPES_ELEC.update(SENSOR_TYPES)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BMW sensors."""
accounts = hass.data[BMW_DOMAIN]
_LOGGER.debug('Found BMW accounts: %s',
', '.join([a.name for a in accounts]))
devices = []
for account in accounts:
for vehicle in account.account.vehicles:
if vehicle.has_hv_battery:
_LOGGER.debug('BMW with a high voltage battery')
for key, value in sorted(SENSOR_TYPES_ELEC.items()):
device = BMWConnectedDriveSensor(
account, vehicle, key, value[0], value[1])
devices.append(device)
elif vehicle.has_internal_combustion_engine:
_LOGGER.debug('BMW with an internal combustion engine')
for key, value in sorted(SENSOR_TYPES.items()):
device = BMWConnectedDriveSensor(
account, vehicle, key, value[0], value[1])
devices.append(device)
add_entities(devices, True)
class BMWConnectedDriveSensor(BinarySensorDevice):
"""Representation of a BMW vehicle binary sensor."""
def __init__(self, account, vehicle, attribute: str, sensor_name,
device_class):
"""Constructor."""
self._account = account
self._vehicle = vehicle
self._attribute = attribute
self._name = '{} {}'.format(self._vehicle.name, self._attribute)
self._unique_id = '{}-{}'.format(self._vehicle.vin, self._attribute)
self._sensor_name = sensor_name
self._device_class = device_class
self._state = None
@property
def should_poll(self) -> bool:
"""Return False.
Data update is triggered from BMWConnectedDriveEntity.
"""
return False
@property
def unique_id(self):
"""Return the unique ID of the binary sensor."""
return self._unique_id
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def device_class(self):
"""Return the class of the binary sensor."""
return self._device_class
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of the binary sensor."""
vehicle_state = self._vehicle.state
result = {
'car': self._vehicle.name
}
if self._attribute == 'lids':
for lid in vehicle_state.lids:
result[lid.name] = lid.state.value
elif self._attribute == 'windows':
for window in vehicle_state.windows:
result[window.name] = window.state.value
elif self._attribute == 'door_lock_state':
result['door_lock_state'] = vehicle_state.door_lock_state.value
result['last_update_reason'] = vehicle_state.last_update_reason
elif self._attribute == 'lights_parking':
result['lights_parking'] = vehicle_state.parking_lights.value
elif self._attribute == 'condition_based_services':
for report in vehicle_state.condition_based_services:
result.update(
self._format_cbs_report(report))
elif self._attribute == 'check_control_messages':
check_control_messages = vehicle_state.check_control_messages
if not check_control_messages:
result['check_control_messages'] = 'OK'
else:
cbs_list = []
for message in check_control_messages:
cbs_list.append(message['ccmDescriptionShort'])
result['check_control_messages'] = cbs_list
elif self._attribute == 'charging_status':
result['charging_status'] = vehicle_state.charging_status.value
# pylint: disable=protected-access
result['last_charging_end_result'] = \
vehicle_state._attributes['lastChargingEndResult']
if self._attribute == 'connection_status':
# pylint: disable=protected-access
result['connection_status'] = \
vehicle_state._attributes['connectionStatus']
return sorted(result.items())
def update(self):
"""Read new state data from the library."""
from bimmer_connected.state import LockState
from bimmer_connected.state import ChargingState
vehicle_state = self._vehicle.state
# device class opening: On means open, Off means closed
if self._attribute == 'lids':
_LOGGER.debug("Status of lid: %s", vehicle_state.all_lids_closed)
self._state = not vehicle_state.all_lids_closed
if self._attribute == 'windows':
self._state = not vehicle_state.all_windows_closed
# device class safety: On means unsafe, Off means safe
if self._attribute == 'door_lock_state':
# Possible values: LOCKED, SECURED, SELECTIVE_LOCKED, UNLOCKED
self._state = vehicle_state.door_lock_state not in \
[LockState.LOCKED, LockState.SECURED]
# device class light: On means light detected, Off means no light
if self._attribute == 'lights_parking':
self._state = vehicle_state.are_parking_lights_on
# device class problem: On means problem detected, Off means no problem
if self._attribute == 'condition_based_services':
self._state = not vehicle_state.are_all_cbs_ok
if self._attribute == 'check_control_messages':
self._state = vehicle_state.has_check_control_messages
# device class power: On means power detected, Off means no power
if self._attribute == 'charging_status':
self._state = vehicle_state.charging_status in \
[ChargingState.CHARGING]
# device class plug: On means device is plugged in,
# Off means device is unplugged
if self._attribute == 'connection_status':
# pylint: disable=protected-access
self._state = (vehicle_state._attributes['connectionStatus'] ==
'CONNECTED')
def _format_cbs_report(self, report):
result = {}
service_type = report.service_type.lower().replace('_', ' ')
result['{} status'.format(service_type)] = report.state.value
if report.due_date is not None:
result['{} date'.format(service_type)] = \
report.due_date.strftime('%Y-%m-%d')
if report.due_distance is not None:
distance = round(self.hass.config.units.length(
report.due_distance, LENGTH_KILOMETERS))
result['{} distance'.format(service_type)] = '{} {}'.format(
distance, self.hass.config.units.length_unit)
return result
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
| apache-2.0 |
mglukhikh/intellij-community | python/lib/Lib/site-packages/django/contrib/formtools/tests.py | 89 | 6613 | import unittest
from django import forms
from django.contrib.formtools import preview, wizard, utils
from django import http
from django.test import TestCase
success_string = "Done was called!"
class TestFormPreview(preview.FormPreview):
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
class TestForm(forms.Form):
field1 = forms.CharField()
field1_ = forms.CharField()
bool1 = forms.BooleanField(required=False)
class PreviewTests(TestCase):
urls = 'django.contrib.formtools.test_urls'
def setUp(self):
# Create a FormPreview instance to share between tests
self.preview = preview.FormPreview(TestForm)
input_template = '<input type="hidden" name="%s" value="%s" />'
self.input = input_template % (self.preview.unused_name('stage'), "%d")
self.test_data = {'field1':u'foo', 'field1_':u'asdf'}
def test_unused_name(self):
"""
Verifies name mangling to get uniue field name.
"""
self.assertEqual(self.preview.unused_name('field1'), 'field1__')
def test_form_get(self):
"""
Test contrib.formtools.preview form retrieval.
Use the client library to see if we can sucessfully retrieve
the form (mostly testing the setup ROOT_URLCONF
process). Verify that an additional hidden input field
is created to manage the stage.
"""
response = self.client.get('/test1/')
stage = self.input % 1
self.assertContains(response, stage, 1)
def test_form_preview(self):
"""
Test contrib.formtools.preview form preview rendering.
Use the client library to POST to the form to see if a preview
is returned. If we do get a form back check that the hidden
value is correctly managing the state of the form.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 1})
response = self.client.post('/test1/', self.test_data)
# Check to confirm stage is set to 2 in output form.
stage = self.input % 2
self.assertContains(response, stage, 1)
def test_form_submit(self):
"""
Test contrib.formtools.preview form submittal.
Use the client library to POST to the form with stage set to 3
to see if our forms done() method is called. Check first
without the security hash, verify failure, retry with security
hash and verify sucess.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/test1/', self.test_data)
self.failIfEqual(response.content, success_string)
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/test1/', self.test_data)
self.assertEqual(response.content, success_string)
def test_bool_submit(self):
"""
Test contrib.formtools.preview form submittal when form contains:
BooleanField(required=False)
Ticket: #6209 - When an unchecked BooleanField is previewed, the preview
form's hash would be computed with no value for ``bool1``. However, when
the preview form is rendered, the unchecked hidden BooleanField would be
rendered with the string value 'False'. So when the preview form is
resubmitted, the hash would be computed with the value 'False' for
``bool1``. We need to make sure the hashes are the same in both cases.
"""
self.test_data.update({'stage':2})
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash':hash, 'bool1':u'False'})
response = self.client.post('/test1/', self.test_data)
self.assertEqual(response.content, success_string)
class SecurityHashTests(unittest.TestCase):
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Nothing notable.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Nothing notable. '})
hash1 = utils.security_hash(None, f1)
hash2 = utils.security_hash(None, f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True)
hash1 = utils.security_hash(None, f1)
hash2 = utils.security_hash(None, f2)
self.assertEqual(hash1, hash2)
class HashTestForm(forms.Form):
name = forms.CharField()
bio = forms.CharField()
class HashTestBlankForm(forms.Form):
name = forms.CharField(required=False)
bio = forms.CharField(required=False)
#
# FormWizard tests
#
class WizardPageOneForm(forms.Form):
field = forms.CharField()
class WizardPageTwoForm(forms.Form):
field = forms.CharField()
class WizardClass(wizard.FormWizard):
def render_template(self, *args, **kw):
return http.HttpResponse("")
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = POST and "POST" or "GET"
if POST is not None:
self.POST.update(POST)
self._dont_enforce_csrf_checks = True
class WizardTests(TestCase):
def test_step_starts_at_zero(self):
"""
step should be zero for the first form
"""
wizard = WizardClass([WizardPageOneForm, WizardPageTwoForm])
request = DummyRequest()
wizard(request)
self.assertEquals(0, wizard.step)
def test_step_increments(self):
"""
step should be incremented when we go to the next page
"""
wizard = WizardClass([WizardPageOneForm, WizardPageTwoForm])
request = DummyRequest(POST={"0-field":"test", "wizard_step":"0"})
response = wizard(request)
self.assertEquals(1, wizard.step)
| apache-2.0 |
elainexmas/boto | tests/unit/ec2/test_instancetype.py | 114 | 5221 | #!/usr/bin/env python
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
import boto.ec2
from boto.ec2.connection import EC2Connection
class TestEC2ConnectionBase(AWSMockServiceTestCase):
connection_class = EC2Connection
def setUp(self):
super(TestEC2ConnectionBase, self).setUp()
self.ec2 = self.service_connection
class TestReservedInstanceOfferings(TestEC2ConnectionBase):
def default_body(self):
return b"""
<DescribeInstanceTypesResponseType xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<VmTypeMessage/>
<instanceTypeDetails>
<item>
<name>m1.small</name><cpu>1</cpu><disk>5</disk><memory>256</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>t1.micro</name><cpu>1</cpu><disk>5</disk><memory>256</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>m1.medium</name><cpu>1</cpu><disk>10</disk><memory>512</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>c1.medium</name><cpu>2</cpu><disk>10</disk><memory>512</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>m1.large</name><cpu>2</cpu><disk>10</disk><memory>512</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>m1.xlarge</name><cpu>2</cpu><disk>10</disk><memory>1024</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>c1.xlarge</name><cpu>2</cpu><disk>10</disk><memory>2048</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>m2.xlarge</name><cpu>2</cpu><disk>10</disk><memory>2048</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>m3.xlarge</name><cpu>4</cpu><disk>15</disk><memory>2048</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>m2.2xlarge</name><cpu>2</cpu><disk>30</disk><memory>4096</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>m3.2xlarge</name><cpu>4</cpu><disk>30</disk><memory>4096</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>cc1.4xlarge</name><cpu>8</cpu><disk>60</disk><memory>3072</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>m2.4xlarge</name><cpu>8</cpu><disk>60</disk><memory>4096</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>hi1.4xlarge</name><cpu>8</cpu><disk>120</disk><memory>6144</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>cc2.8xlarge</name><cpu>16</cpu><disk>120</disk><memory>6144</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>cg1.4xlarge</name><cpu>16</cpu><disk>200</disk><memory>12288</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>cr1.8xlarge</name><cpu>16</cpu><disk>240</disk><memory>16384</memory>
<availability/><ephemeralDisk/>
</item>
<item>
<name>hs1.8xlarge</name><cpu>48</cpu><disk>24000</disk><memory>119808</memory>
<availability/><ephemeralDisk/>
</item>
</instanceTypeDetails>
</DescribeInstanceTypesResponseType>
"""
def test_get_instance_types(self):
self.set_http_response(status_code=200)
response = self.ec2.get_all_instance_types()
self.assertEqual(len(response), 18)
instance_type = response[0]
self.assertEqual(instance_type.name, 'm1.small')
self.assertEqual(instance_type.cores, '1')
self.assertEqual(instance_type.disk, '5')
self.assertEqual(instance_type.memory, '256')
instance_type = response[17]
self.assertEqual(instance_type.name, 'hs1.8xlarge')
self.assertEqual(instance_type.cores, '48')
self.assertEqual(instance_type.disk, '24000')
self.assertEqual(instance_type.memory, '119808')
if __name__ == '__main__':
unittest.main()
| mit |
lucafavatella/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/polygon.py | 321 | 6506 | from ctypes import c_uint, byref
from django.contrib.gis.geos.error import GEOSIndexError
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import get_pointer_arr, GEOM_PTR
from django.contrib.gis.geos.linestring import LinearRing
from django.contrib.gis.geos import prototypes as capi
class Polygon(GEOSGeometry):
_minlength = 1
def __init__(self, *args, **kwargs):
"""
Initializes on an exterior ring and a sequence of holes (both
instances may be either LinearRing instances, or a tuple/list
that may be constructed into a LinearRing).
Examples of initialization, where shell, hole1, and hole2 are
valid LinearRing geometries:
>>> poly = Polygon(shell, hole1, hole2)
>>> poly = Polygon(shell, (hole1, hole2))
Example where a tuple parameters are used:
>>> poly = Polygon(((0, 0), (0, 10), (10, 10), (0, 10), (0, 0)),
((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))
"""
if not args:
raise TypeError('Must provide at least one LinearRing, or a tuple, to initialize a Polygon.')
# Getting the ext_ring and init_holes parameters from the argument list
ext_ring = args[0]
init_holes = args[1:]
n_holes = len(init_holes)
# If initialized as Polygon(shell, (LinearRing, LinearRing)) [for backward-compatibility]
if n_holes == 1 and isinstance(init_holes[0], (tuple, list)):
if len(init_holes[0]) == 0:
init_holes = ()
n_holes = 0
elif isinstance(init_holes[0][0], LinearRing):
init_holes = init_holes[0]
n_holes = len(init_holes)
polygon = self._create_polygon(n_holes + 1, (ext_ring,) + init_holes)
super(Polygon, self).__init__(polygon, **kwargs)
def __iter__(self):
"Iterates over each ring in the polygon."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of rings in this Polygon."
return self.num_interior_rings + 1
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return GEOSGeometry( 'POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0) )
### These routines are needed for list-like operation w/ListMixin ###
def _create_polygon(self, length, items):
# Instantiate LinearRing objects if necessary, but don't clone them yet
# _construct_ring will throw a TypeError if a parameter isn't a valid ring
# If we cloned the pointers here, we wouldn't be able to clean up
# in case of error.
rings = []
for r in items:
if isinstance(r, GEOM_PTR):
rings.append(r)
else:
rings.append(self._construct_ring(r))
shell = self._clone(rings.pop(0))
n_holes = length - 1
if n_holes:
holes = get_pointer_arr(n_holes)
for i, r in enumerate(rings):
holes[i] = self._clone(r)
holes_param = byref(holes)
else:
holes_param = None
return capi.create_polygon(shell, holes_param, c_uint(n_holes))
def _clone(self, g):
if isinstance(g, GEOM_PTR):
return capi.geom_clone(g)
else:
return capi.geom_clone(g.ptr)
def _construct_ring(self, param, msg='Parameter must be a sequence of LinearRings or objects that can initialize to LinearRings'):
"Helper routine for trying to construct a ring from the given parameter."
if isinstance(param, LinearRing): return param
try:
ring = LinearRing(param)
return ring
except TypeError:
raise TypeError(msg)
def _set_list(self, length, items):
# Getting the current pointer, replacing with the newly constructed
# geometry, and destroying the old geometry.
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_polygon(length, items)
if srid: self.srid = srid
capi.destroy_geom(prev_ptr)
def _get_single_internal(self, index):
"""
Returns the ring at the specified index. The first index, 0, will
always return the exterior ring. Indices > 0 will return the
interior ring at the given index (e.g., poly[1] and poly[2] would
return the first and second interior ring, respectively).
CAREFUL: Internal/External are not the same as Interior/Exterior!
_get_single_internal returns a pointer from the existing geometries for use
internally by the object's methods. _get_single_external returns a clone
of the same geometry for use by external code.
"""
if index == 0:
return capi.get_extring(self.ptr)
else:
# Getting the interior ring, have to subtract 1 from the index.
return capi.get_intring(self.ptr, index-1)
def _get_single_external(self, index):
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
#### Polygon Properties ####
@property
def num_interior_rings(self):
"Returns the number of interior rings."
# Getting the number of rings
return capi.get_nrings(self.ptr)
def _get_ext_ring(self):
"Gets the exterior ring of the Polygon."
return self[0]
def _set_ext_ring(self, ring):
"Sets the exterior ring of the Polygon."
self[0] = ring
# Properties for the exterior ring/shell.
exterior_ring = property(_get_ext_ring, _set_ext_ring)
shell = exterior_ring
@property
def tuple(self):
"Gets the tuple for each ring in this Polygon."
return tuple([self[i].tuple for i in xrange(len(self))])
coords = tuple
@property
def kml(self):
"Returns the KML representation of this Polygon."
inner_kml = ''.join(["<innerBoundaryIs>%s</innerBoundaryIs>" % self[i+1].kml
for i in xrange(self.num_interior_rings)])
return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % (self[0].kml, inner_kml)
| apache-2.0 |
Hossein-Noroozpour/PyHDM | hml/classification/HNearestNeighborsClassifier.py | 1 | 2014 | #!/usr/bin/python3.3
# coding=utf-8
"""
Module for K nearest neighbours.
"""
__author__ = 'Hossein Noroozpour Thany Abady'
#from math3d import sqrt
import numpy
class HNearestNeighboursClassifier():
"""
Class for K nearest neighbors algorithm.
"""
def __init__(self, n_neighbors=5, weight_function=lambda l: [1. / (d + .0001) for d in l], weight_name='i'):
self.n_neighbors = n_neighbors
self.weight_function = weight_function
self.train = None
self.target = None
self.weight_name = weight_name
def fit(self, train, target):
"""
:param train:
:param target:
"""
self.train = numpy.array(train)
self.target = target
return self
def predict(self, test):
"""
:param test:
"""
result = []
test = numpy.array(test)
for t in test:
distances = []
for r in self.train:
d = r - t
distances.append(sqrt(d.dot(d)))
weights = self.weight_function(distances)
wc = [(weights[i], self.target[i]) for i in range(len(self.target))]
wc.sort(key=lambda tup: tup[0], reverse=True)
v = dict()
for i in range(self.n_neighbors):
if v.get(wc[i][1]) is None:
v[wc[i][1]] = 1
else:
v[wc[i][1]] += 1
vote = 0
c = 0
for k in v.keys():
if v[k] >= vote:
c = k
result.append(c)
return result
def __str__(self):
return 'K nearest neighbors classifier with n=' + str(self.n_neighbors) + ' and weight=' + str(self.weight_name)
def score(self, x, y):
"""
:param x:
:param y:
"""
p = self.predict(x)
c = 0
for i in range(len(y)):
if p[i] == y[i]:
c += 1
return float(c) / float(len(y)) | mit |
TridevGuha/pywikibot-core | tests/weblinkchecker_tests.py | 4 | 2724 | # -*- coding: utf-8 -*-
"""weblinkchecker test module."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
import datetime
from pywikibot.tools import PY2
if not PY2:
from urllib.parse import urlparse
else:
from urlparse import urlparse
from scripts import weblinkchecker
from tests.aspects import unittest, TestCase, TestCaseBase
from tests import weblib_tests
class MementoTestBase(TestCaseBase):
"""Test memento client."""
@classmethod
def setUpClass(cls):
"""Set up test class."""
if isinstance(weblinkchecker.memento_client, ImportError):
raise unittest.SkipTest('memento_client not imported')
super(MementoTestBase, cls).setUpClass()
def _get_archive_url(self, url, date_string=None):
if date_string is None:
when = datetime.datetime.now()
else:
when = datetime.datetime.strptime(date_string, '%Y%m%d')
return weblinkchecker._get_closest_memento_url(
url,
when,
self.timegate_uri)
class WeblibTestMementoInternetArchive(MementoTestBase, weblib_tests.TestInternetArchive):
"""Test InternetArchive Memento using old weblib tests."""
timegate_uri = 'http://web.archive.org/web/'
hostname = timegate_uri
class WeblibTestMementoWebCite(MementoTestBase, weblib_tests.TestWebCite):
"""Test WebCite Memento using old weblib tests."""
timegate_uri = 'http://timetravel.mementoweb.org/webcite/timegate/'
hostname = timegate_uri
class TestMementoWebCite(MementoTestBase):
"""New WebCite Memento tests."""
timegate_uri = 'http://timetravel.mementoweb.org/webcite/timegate/'
hostname = timegate_uri
def test_newest(self):
"""Test WebCite for newest https://google.com."""
archivedversion = self._get_archive_url('https://google.com')
parsed = urlparse(archivedversion)
self.assertIn(parsed.scheme, ['http', 'https'])
self.assertEqual(parsed.netloc, 'www.webcitation.org')
class TestMementoDefault(MementoTestBase, TestCase):
"""Test InternetArchive is default Memento timegate."""
timegate_uri = None
net = True
def test_newest(self):
"""Test getting memento for newest https://google.com."""
archivedversion = self._get_archive_url('https://google.com')
self.assertIsNotNone(archivedversion)
def test_invalid(self):
"""Test getting memento for invalid URL."""
self.assertRaises(Exception, self._get_archive_url, 'invalid')
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| mit |
birdonwheels5/p2pool-myrScrypt | SOAPpy/NS.py | 289 | 3724 | from __future__ import nested_scopes
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: NS.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
##############################################################################
# Namespace Class
################################################################################
def invertDict(dict):
d = {}
for k, v in dict.items():
d[v] = k
return d
class NS:
XML = "http://www.w3.org/XML/1998/namespace"
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
XSD = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_L = [XSD, XSD2, XSD3]
EXSD_L= [ENC, XSD, XSD2, XSD3]
XSI = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_L = [XSI, XSI2, XSI3]
URN = "http://soapinterop.org/xsd"
# For generated messages
XML_T = "xml"
ENV_T = "SOAP-ENV"
ENC_T = "SOAP-ENC"
XSD_T = "xsd"
XSD2_T= "xsd2"
XSD3_T= "xsd3"
XSI_T = "xsi"
XSI2_T= "xsi2"
XSI3_T= "xsi3"
URN_T = "urn"
NSMAP = {ENV_T: ENV, ENC_T: ENC, XSD_T: XSD, XSD2_T: XSD2,
XSD3_T: XSD3, XSI_T: XSI, XSI2_T: XSI2, XSI3_T: XSI3,
URN_T: URN}
NSMAP_R = invertDict(NSMAP)
STMAP = {'1999': (XSD_T, XSI_T), '2000': (XSD2_T, XSI2_T),
'2001': (XSD3_T, XSI3_T)}
STMAP_R = invertDict(STMAP)
def __init__(self):
raise Error, "Don't instantiate this"
| gpl-3.0 |
ndexbio/ndex-enrich | Persistence_Unit_Tests.py | 1 | 5444 | __author__ = 'aarongary'
import unittest
from persistence import EnrichmentPersistence
class Dev_Uint_Tests(unittest.TestCase):
def test_persistence(self):
try:
my_json = {
'termClassification': [{
'status': 'unknown',
'geneSymbol': '',
'termId': 'RATTUS',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '0.0',
'disease': '0.0',
'drug': '0.0',
'genome': '1.0'
},
'desc': ''
}, {
'status': 'success',
'geneSymbol': 'ENSG00000230855',
'termId': 'OR2J3',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '1.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': 'olfactory receptor, family 2, subfamily J, member 3 [Source:HGNC Symbol;Acc:HGNC:8261]'
}, {
'status': 'success',
'geneSymbol': 'ENSG00000129673',
'termId': 'AANAT',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '1.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': 'aralkylamine N-acetyltransferase [Source:HGNC Symbol;Acc:HGNC:19]'
}, {
'status': 'success',
'geneSymbol': '',
'termId': 'LYMPHATIC',
'probabilitiesMap': {
'icd10': '1.0',
'gene': '0.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': ''
}, {
'status': 'success',
'geneSymbol': 'ENSG00000163749',
'termId': 'CCDC158',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '1.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': 'coiled-coil domain containing 158 [Source:HGNC Symbol;Acc:HGNC:26374]'
}, {
'status': 'success',
'geneSymbol': 'ENSG00000173261',
'termId': 'PLAC8L1',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '1.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': 'PLAC8-like 1 [Source:HGNC Symbol;Acc:HGNC:31746]'
}, {
'status': 'success',
'geneSymbol': '',
'termId': 'CAFFEINE',
'probabilitiesMap': {
'icd10': '0.5',
'gene': '0.0',
'disease': '0.0',
'drug': '0.5',
'genome': '0.0'
},
'desc': ''
}, {
'status': 'success',
'geneSymbol': '',
'termId': 'HUMAN',
'probabilitiesMap': {
'icd10': '1.0',
'gene': '0.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': ''
}, {
'status': 'unknown',
'geneSymbol': '',
'termId': 'ASLFDKJDS',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '0.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': ''
}]
}
ep = EnrichmentPersistence()
ep.save_file(my_json, 'my_test_6')
ep.save_file(my_json, 'my_test_7')
ep.save_file(my_json, 'my_test_8')
ep.save_file(my_json, 'my_test_9')
ep.save_file(my_json, 'my_test_10')
print ep.get_file('my_test_8')
self.assertTrue(1 == 1)
except Exception as e:
print e.message
self.assertTrue(1 == 0)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
40223139/39g7test | static/Brython3.1.0-20150301-090019/Lib/xml/etree/__init__.py | 1200 | 1604 | # $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
| gpl-3.0 |
coldmind/django | django/template/loaders/base.py | 4 | 3864 | import warnings
from django.template import Origin, Template, TemplateDoesNotExist
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.inspect import func_supports_parameter
class Loader(object):
# Only used to raise a deprecation warning. Remove in Django 2.0.
_accepts_engine_in_init = True
def __init__(self, engine):
self.engine = engine
def __call__(self, template_name, template_dirs=None):
# RemovedInDjango21Warning: Allow loaders to be called like functions.
return self.load_template(template_name, template_dirs)
def get_template(self, template_name, template_dirs=None, skip=None):
"""
Calls self.get_template_sources() and returns a Template object for
the first template matching template_name. If skip is provided,
template origins in skip are ignored. This is used to avoid recursion
during template extending.
"""
tried = []
args = [template_name]
# RemovedInDjango21Warning: Add template_dirs for compatibility with
# old loaders
if func_supports_parameter(self.get_template_sources, 'template_dirs'):
args.append(template_dirs)
for origin in self.get_template_sources(*args):
if skip is not None and origin in skip:
tried.append((origin, 'Skipped'))
continue
try:
contents = self.get_contents(origin)
except TemplateDoesNotExist:
tried.append((origin, 'Source does not exist'))
continue
else:
return Template(
contents, origin, origin.template_name, self.engine,
)
raise TemplateDoesNotExist(template_name, tried=tried)
def load_template(self, template_name, template_dirs=None):
warnings.warn(
'The load_template() method is deprecated. Use get_template() '
'instead.', RemovedInDjango21Warning,
)
source, display_name = self.load_template_source(
template_name, template_dirs,
)
origin = Origin(
name=display_name,
template_name=template_name,
loader=self,
)
try:
template = Template(source, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the
# template we were asked to load. This allows for correct
# identification of the actual template that does not exist.
return source, display_name
else:
return template, None
def get_template_sources(self, template_name):
"""
An iterator that yields possible matching template paths for a
template name.
"""
raise NotImplementedError(
'subclasses of Loader must provide a get_template_sources() method'
)
def load_template_source(self, template_name, template_dirs=None):
"""
RemovedInDjango21Warning: Returns a tuple containing the source and
origin for the given template name.
"""
raise NotImplementedError(
'subclasses of Loader must provide a load_template_source() method'
)
def reset(self):
"""
Resets any state maintained by the loader instance (e.g. cached
templates or cached loader modules).
"""
pass
@property
def supports_recursion(self):
"""
RemovedInDjango21Warning: This is an internal property used by the
ExtendsNode during the deprecation of non-recursive loaders.
"""
return hasattr(self, 'get_contents')
| bsd-3-clause |
minhphung171093/GreenERP | openerp/addons/point_of_sale/wizard/pos_discount.py | 44 | 1320 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv, fields
class pos_discount(osv.osv_memory):
_name = 'pos.discount'
_description = 'Add a Global Discount'
_columns = {
'discount': fields.float('Discount (%)', required=True, digits=(16,2)),
}
_defaults = {
'discount': 5,
}
def apply_discount(self, cr, uid, ids, context=None):
"""
To give the discount of product and check the.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : nothing
"""
order_ref = self.pool.get('pos.order')
order_line_ref = self.pool.get('pos.order.line')
if context is None:
context = {}
this = self.browse(cr, uid, ids[0], context=context)
record_id = context and context.get('active_id', False)
if isinstance(record_id, (int, long)):
record_id = [record_id]
for order in order_ref.browse(cr, uid, record_id, context=context):
order_line_ref.write(cr, uid, [x.id for x in order.lines], {'discount':this.discount}, context=context)
return {}
| gpl-3.0 |
ybonjour/nuus | services/indexing/Indexer.py | 1 | 3702 | __author__ = 'Yves Bonjour'
from Tokenizer import create_tokenizer
import redis
import uuid
def create_indexer(redis_host, redis_port):
tokenizer = create_tokenizer()
redis_db = redis.Redis(redis_host, redis_port)
store = RedisIndexStore(redis_db)
return Indexer(store, tokenizer)
class Indexer:
def __init__(self, store, tokenizer):
self.store = store
self.tokenizer = tokenizer
def index(self, text, document_id):
tokens = self.tokenizer.tokenize(text)
for token in tokens:
self.store.add(document_id, token)
def document_frequency_normalized(self, term):
return float(self.store.document_frequency(term)) / float(self.store.num_documents())
def term_document_frequency(self, document, term):
return self.store.term_document_frequency(document, term)
def get_posting_list(self, term):
return self.store.posting_list(term)
def get_terms(self, document):
return self.store.get_terms(document)
class MemoryIndexStore(object):
def __init__(self):
self.posting_lists = {}
self.documents = {}
def posting_list(self, term):
if term not in self.posting_lists:
return {}
return self.posting_lists[term]
def get_terms(self, document):
if document not in self.documents:
return []
return self.documents[document]
def document_frequency(self, term):
if term not in self.posting_lists:
return 0
return len(self.posting_lists[term])
def num_documents(self):
return len(self.documents)
def term_document_frequency(self, document, term):
if term not in self.posting_lists or document not in self.posting_lists[term]:
return 0
return self.posting_lists[term][document]
def add(self, document, term):
if term not in self.posting_lists:
self.posting_lists[term] = {}
if document not in self.posting_lists[term]:
self.posting_lists[term][document] = 0
self.posting_lists[term][document] += 1
if document not in self.documents:
self.documents[document] = set()
self.documents[document].add(term)
class RedisIndexStore(object):
def __init__(self, redis):
self.redis = redis
def posting_list(self, term):
return {uuid.UUID(document): int(self.redis.get(self._posting_key(term, document)))
for document in self.redis.smembers(self._term_key(term))}
def document_frequency(self, term):
return len(self.redis.smembers(self._term_key(term)))
def get_terms(self, document):
return self.redis.smembers(self._document_key(document))
def num_documents(self):
return len(self.redis.smembers(self._documents_key()))
def term_document_frequency(self, document, term):
tdf = self.redis.get(self._posting_key(term, document))
return int(tdf) if tdf else 0
def add(self, document, term):
self.redis.sadd(self._documents_key(), document)
self.redis.sadd(self._term_key(term), document)
self.redis.sadd(self._document_key(document), term)
self.redis.setnx(self._posting_key(term, document), 0)
self.redis.incr(self._posting_key(term, document))
def _documents_key(self):
return "documents"
def _document_key(self, document):
return "document:{document}".format(document=document)
def _term_key(self, term):
return "term:{term}".format(term=term)
def _posting_key(self, term, document):
return "posting:{term}:{document}".format(term=term, document=document) | mit |
yati-sagade/pip | pip/_vendor/lockfile/pidlockfile.py | 488 | 6221 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import os
import sys
import errno
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
dirname = os.path.dirname(self.lock_file)
basename = os.path.split(self.path)[-1]
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
line = "%(pid)d\n" % vars()
pidfile.write(line)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
| mit |
joomel1/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/generator/gypsh.py | 151 | 1660 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| bsd-3-clause |
coderbone/SickRage | lib/unidecode/x0b7.py | 253 | 4833 | data = (
'ddwim', # 0x00
'ddwib', # 0x01
'ddwibs', # 0x02
'ddwis', # 0x03
'ddwiss', # 0x04
'ddwing', # 0x05
'ddwij', # 0x06
'ddwic', # 0x07
'ddwik', # 0x08
'ddwit', # 0x09
'ddwip', # 0x0a
'ddwih', # 0x0b
'ddyu', # 0x0c
'ddyug', # 0x0d
'ddyugg', # 0x0e
'ddyugs', # 0x0f
'ddyun', # 0x10
'ddyunj', # 0x11
'ddyunh', # 0x12
'ddyud', # 0x13
'ddyul', # 0x14
'ddyulg', # 0x15
'ddyulm', # 0x16
'ddyulb', # 0x17
'ddyuls', # 0x18
'ddyult', # 0x19
'ddyulp', # 0x1a
'ddyulh', # 0x1b
'ddyum', # 0x1c
'ddyub', # 0x1d
'ddyubs', # 0x1e
'ddyus', # 0x1f
'ddyuss', # 0x20
'ddyung', # 0x21
'ddyuj', # 0x22
'ddyuc', # 0x23
'ddyuk', # 0x24
'ddyut', # 0x25
'ddyup', # 0x26
'ddyuh', # 0x27
'ddeu', # 0x28
'ddeug', # 0x29
'ddeugg', # 0x2a
'ddeugs', # 0x2b
'ddeun', # 0x2c
'ddeunj', # 0x2d
'ddeunh', # 0x2e
'ddeud', # 0x2f
'ddeul', # 0x30
'ddeulg', # 0x31
'ddeulm', # 0x32
'ddeulb', # 0x33
'ddeuls', # 0x34
'ddeult', # 0x35
'ddeulp', # 0x36
'ddeulh', # 0x37
'ddeum', # 0x38
'ddeub', # 0x39
'ddeubs', # 0x3a
'ddeus', # 0x3b
'ddeuss', # 0x3c
'ddeung', # 0x3d
'ddeuj', # 0x3e
'ddeuc', # 0x3f
'ddeuk', # 0x40
'ddeut', # 0x41
'ddeup', # 0x42
'ddeuh', # 0x43
'ddyi', # 0x44
'ddyig', # 0x45
'ddyigg', # 0x46
'ddyigs', # 0x47
'ddyin', # 0x48
'ddyinj', # 0x49
'ddyinh', # 0x4a
'ddyid', # 0x4b
'ddyil', # 0x4c
'ddyilg', # 0x4d
'ddyilm', # 0x4e
'ddyilb', # 0x4f
'ddyils', # 0x50
'ddyilt', # 0x51
'ddyilp', # 0x52
'ddyilh', # 0x53
'ddyim', # 0x54
'ddyib', # 0x55
'ddyibs', # 0x56
'ddyis', # 0x57
'ddyiss', # 0x58
'ddying', # 0x59
'ddyij', # 0x5a
'ddyic', # 0x5b
'ddyik', # 0x5c
'ddyit', # 0x5d
'ddyip', # 0x5e
'ddyih', # 0x5f
'ddi', # 0x60
'ddig', # 0x61
'ddigg', # 0x62
'ddigs', # 0x63
'ddin', # 0x64
'ddinj', # 0x65
'ddinh', # 0x66
'ddid', # 0x67
'ddil', # 0x68
'ddilg', # 0x69
'ddilm', # 0x6a
'ddilb', # 0x6b
'ddils', # 0x6c
'ddilt', # 0x6d
'ddilp', # 0x6e
'ddilh', # 0x6f
'ddim', # 0x70
'ddib', # 0x71
'ddibs', # 0x72
'ddis', # 0x73
'ddiss', # 0x74
'dding', # 0x75
'ddij', # 0x76
'ddic', # 0x77
'ddik', # 0x78
'ddit', # 0x79
'ddip', # 0x7a
'ddih', # 0x7b
'ra', # 0x7c
'rag', # 0x7d
'ragg', # 0x7e
'rags', # 0x7f
'ran', # 0x80
'ranj', # 0x81
'ranh', # 0x82
'rad', # 0x83
'ral', # 0x84
'ralg', # 0x85
'ralm', # 0x86
'ralb', # 0x87
'rals', # 0x88
'ralt', # 0x89
'ralp', # 0x8a
'ralh', # 0x8b
'ram', # 0x8c
'rab', # 0x8d
'rabs', # 0x8e
'ras', # 0x8f
'rass', # 0x90
'rang', # 0x91
'raj', # 0x92
'rac', # 0x93
'rak', # 0x94
'rat', # 0x95
'rap', # 0x96
'rah', # 0x97
'rae', # 0x98
'raeg', # 0x99
'raegg', # 0x9a
'raegs', # 0x9b
'raen', # 0x9c
'raenj', # 0x9d
'raenh', # 0x9e
'raed', # 0x9f
'rael', # 0xa0
'raelg', # 0xa1
'raelm', # 0xa2
'raelb', # 0xa3
'raels', # 0xa4
'raelt', # 0xa5
'raelp', # 0xa6
'raelh', # 0xa7
'raem', # 0xa8
'raeb', # 0xa9
'raebs', # 0xaa
'raes', # 0xab
'raess', # 0xac
'raeng', # 0xad
'raej', # 0xae
'raec', # 0xaf
'raek', # 0xb0
'raet', # 0xb1
'raep', # 0xb2
'raeh', # 0xb3
'rya', # 0xb4
'ryag', # 0xb5
'ryagg', # 0xb6
'ryags', # 0xb7
'ryan', # 0xb8
'ryanj', # 0xb9
'ryanh', # 0xba
'ryad', # 0xbb
'ryal', # 0xbc
'ryalg', # 0xbd
'ryalm', # 0xbe
'ryalb', # 0xbf
'ryals', # 0xc0
'ryalt', # 0xc1
'ryalp', # 0xc2
'ryalh', # 0xc3
'ryam', # 0xc4
'ryab', # 0xc5
'ryabs', # 0xc6
'ryas', # 0xc7
'ryass', # 0xc8
'ryang', # 0xc9
'ryaj', # 0xca
'ryac', # 0xcb
'ryak', # 0xcc
'ryat', # 0xcd
'ryap', # 0xce
'ryah', # 0xcf
'ryae', # 0xd0
'ryaeg', # 0xd1
'ryaegg', # 0xd2
'ryaegs', # 0xd3
'ryaen', # 0xd4
'ryaenj', # 0xd5
'ryaenh', # 0xd6
'ryaed', # 0xd7
'ryael', # 0xd8
'ryaelg', # 0xd9
'ryaelm', # 0xda
'ryaelb', # 0xdb
'ryaels', # 0xdc
'ryaelt', # 0xdd
'ryaelp', # 0xde
'ryaelh', # 0xdf
'ryaem', # 0xe0
'ryaeb', # 0xe1
'ryaebs', # 0xe2
'ryaes', # 0xe3
'ryaess', # 0xe4
'ryaeng', # 0xe5
'ryaej', # 0xe6
'ryaec', # 0xe7
'ryaek', # 0xe8
'ryaet', # 0xe9
'ryaep', # 0xea
'ryaeh', # 0xeb
'reo', # 0xec
'reog', # 0xed
'reogg', # 0xee
'reogs', # 0xef
'reon', # 0xf0
'reonj', # 0xf1
'reonh', # 0xf2
'reod', # 0xf3
'reol', # 0xf4
'reolg', # 0xf5
'reolm', # 0xf6
'reolb', # 0xf7
'reols', # 0xf8
'reolt', # 0xf9
'reolp', # 0xfa
'reolh', # 0xfb
'reom', # 0xfc
'reob', # 0xfd
'reobs', # 0xfe
'reos', # 0xff
)
| gpl-3.0 |
dleicht/PSB | pydrive/files.py | 4 | 9589 | import io
import mimetypes
from apiclient import errors
from apiclient.http import MediaIoBaseUpload
from functools import wraps
from .apiattr import ApiAttribute
from .apiattr import ApiAttributeMixin
from .apiattr import ApiResource
from .apiattr import ApiResourceList
from .auth import LoadAuth
class FileNotUploadedError(RuntimeError):
"""Error trying to access metadata of file that is not uploaded."""
class ApiRequestError(IOError):
"""Error while making any API requests."""
class FileNotDownloadableError(RuntimeError):
"""Error trying to download file that is not downloadable."""
def LoadMetadata(decoratee):
"""Decorator to check if the file has metadata and fetches it if not.
:raises: ApiRequestError, FileNotUploadedError
"""
@wraps(decoratee)
def _decorated(self, *args, **kwargs):
if not self.uploaded:
self.FetchMetadata()
return decoratee(self, *args, **kwargs)
return _decorated
class GoogleDriveFileList(ApiResourceList):
"""Google Drive FileList instance.
Equivalent to Files.list() in Drive APIs.
"""
def __init__(self, auth=None, param=None):
"""Create an instance of GoogleDriveFileList."""
super(GoogleDriveFileList, self).__init__(auth=auth, metadata=param)
@LoadAuth
def _GetList(self):
"""Overwritten method which actually makes API call to list files.
:returns: list -- list of pydrive.files.GoogleDriveFile.
"""
self.metadata = self.auth.service.files().list(**dict(self)).execute()
result = []
for file_metadata in self.metadata['items']:
tmp_file = GoogleDriveFile(
auth=self.auth,
metadata=file_metadata,
uploaded=True)
result.append(tmp_file)
return result
class GoogleDriveFile(ApiAttributeMixin, ApiResource):
"""Google Drive File instance.
Inherits ApiResource which inherits dict.
Can access and modify metadata like dictionary.
"""
content = ApiAttribute('content')
uploaded = ApiAttribute('uploaded')
metadata = ApiAttribute('metadata')
def __init__(self, auth=None, metadata=None, uploaded=False):
"""Create an instance of GoogleDriveFile.
:param auth: authorized GoogleAuth instance.
:type auth: pydrive.auth.GoogleAuth
:param metadata: file resource to initialize GoogleDirveFile with.
:type metadata: dict.
:param uploaded: True if this file is confirmed to be uploaded.
:type uploaded: bool.
"""
ApiAttributeMixin.__init__(self)
ApiResource.__init__(self)
self.metadata = {}
self.dirty = {'content': False}
self.auth = auth
self.uploaded = uploaded
if uploaded:
self.UpdateMetadata(metadata)
elif metadata:
self.update(metadata)
def __getitem__(self, key):
"""Overwrites manner of accessing Files resource.
If this file instance is not uploaded and id is specified,
it will try to look for metadata with Files.get().
:param key: key of dictionary query.
:type key: str.
:returns: value of Files resource
:raises: KeyError, FileNotUploadedError
"""
try:
return dict.__getitem__(self, key)
except KeyError, e:
if self.uploaded:
raise KeyError(e)
if self.get('id'):
self.FetchMetadata()
return dict.__getitem__(self, key)
else:
raise FileNotUploadedError()
def SetContentString(self, content):
"""Set content of this file to be a string.
Creates io.BytesIO instance of utf-8 encoded string.
Sets mimeType to be 'text/plain' if not specified.
:param content: content of the file in string.
:type content: str.
"""
self.content = io.BytesIO(content.encode('utf-8'))
if self.get('mimeType') is None:
self['mimeType'] = 'text/plain'
def SetContentFile(self, filename):
"""Set content of this file from a file.
Opens the file specified by this method.
Will be read, uploaded, and closed by Upload() method.
Sets metadata 'title' and 'mimeType' automatically if not specified.
:param filename: name of the file to be uploaded.
:type filename: str.
"""
self.content = open(filename, 'rb')
if self.get('title') is None:
self['title'] = filename
if self.get('mimeType') is None:
self['mimeType'] = mimetypes.guess_type(filename)[0]
def GetContentString(self):
"""Get content of this file as a string.
:returns: str -- utf-8 decoded content of the file
:raises: ApiRequestError, FileNotUploadedError, FileNotDownloadableError
"""
if self.content is None or type(self.content) is not io.BytesIO:
self.FetchContent()
return self.content.getvalue().decode('utf-8')
def GetContentFile(self, filename, mimetype=None):
"""Save content of this file as a local file.
:param filename: name of the file to write to.
:type filename: str.
:raises: ApiRequestError, FileNotUploadedError, FileNotDownloadableError
"""
if self.content is None or type(self.content) is not io.BytesIO:
self.FetchContent(mimetype)
f = open(filename, 'wb')
f.write(self.content.getvalue())
f.close()
@LoadAuth
def FetchMetadata(self):
"""Download file's metadata from id using Files.get().
:raises: ApiRequestError, FileNotUploadedError
"""
file_id = self.metadata.get('id') or self.get('id')
if file_id:
try:
metadata = self.auth.service.files().get(fileId=file_id).execute()
except errors.HttpError, error:
raise ApiRequestError(error)
else:
self.uploaded = True
self.UpdateMetadata(metadata)
else:
raise FileNotUploadedError()
@LoadMetadata
def FetchContent(self, mimetype=None):
"""Download file's content from download_url.
:raises: ApiRequestError, FileNotUploadedError, FileNotDownloadableError
"""
download_url = self.metadata.get('downloadUrl')
if download_url:
self.content = io.BytesIO(self._DownloadFromUrl(download_url))
self.dirty['content'] = False
return
export_links = self.metadata.get('exportLinks')
if export_links and export_links.get(mimetype):
self.content = io.BytesIO(
self._DownloadFromUrl(export_links.get(mimetype)))
self.dirty['content'] = False
return
raise FileNotDownloadableError(
'No downloadLink/exportLinks for mimetype found in metadata')
def Upload(self, param=None):
"""Upload/update file by choosing the most efficient method.
:param param: additional parameter to upload file.
:type param: dict.
:raises: ApiRequestError
"""
if self.uploaded or self.get('id') is not None:
if self.dirty['content']:
self._FilesUpdate(param=param)
else:
self._FilesPatch(param=param)
else:
self._FilesInsert(param=param)
@LoadAuth
def _FilesInsert(self, param=None):
"""Upload a new file using Files.insert().
:param param: additional parameter to upload file.
:type param: dict.
:raises: ApiRequestError
"""
if param is None:
param = {}
param['body'] = self.GetChanges()
try:
if self.dirty['content']:
param['media_body'] = self._BuildMediaBody()
metadata = self.auth.service.files().insert(**param).execute()
except errors.HttpError, error:
raise ApiRequestError(error)
else:
self.uploaded = True
self.dirty['content'] = False
self.UpdateMetadata(metadata)
@LoadAuth
@LoadMetadata
def _FilesUpdate(self, param=None):
"""Update metadata and/or content using Files.Update().
:param param: additional parameter to upload file.
:type param: dict.
:raises: ApiRequestError, FileNotUploadedError
"""
if param is None:
param = {}
param['body'] = self.GetChanges()
param['fileId'] = self.metadata.get('id')
try:
if self.dirty['content']:
param['media_body'] = self._BuildMediaBody()
metadata = self.auth.service.files().update(**param).execute()
except errors.HttpError, error:
raise ApiRequestError(error)
else:
self.uploaded = True
self.dirty['content'] = False
self.UpdateMetadata(metadata)
@LoadAuth
@LoadMetadata
def _FilesPatch(self, param=None):
"""Update metadata using Files.Patch().
:param param: additional parameter to upload file.
:type param: dict.
:raises: ApiRequestError, FileNotUploadedError
"""
if param is None:
param = {}
param['body'] = self.GetChanges()
param['fileId'] = self.metadata.get('id')
try:
metadata = self.auth.service.files().patch(**param).execute()
except errors.HttpError, error:
raise ApiRequestError(error)
else:
self.UpdateMetadata(metadata)
def _BuildMediaBody(self):
"""Build MediaIoBaseUpload to get prepared to upload content of the file.
Sets mimeType as 'application/octet-stream' if not specified.
:returns: MediaIoBaseUpload -- instance that will be used to upload content.
"""
if self.get('mimeType') is None:
self['mimeType'] = 'application/octet-stream'
return MediaIoBaseUpload(self.content, self['mimeType'])
@LoadAuth
def _DownloadFromUrl(self, url):
"""Download file from url using provided credential.
:param url: link of the file to download.
:type url: str.
:returns: str -- content of downloaded file in string.
:raises: ApiRequestError
"""
resp, content = self.auth.service._http.request(url)
if resp.status != 200:
raise ApiRequestError('Cannot download file: %s' % resp)
return content
| mit |
kingvuplus/nn-gui | lib/python/Components/Scanner.py | 5 | 4791 | from Plugins.Plugin import PluginDescriptor
from Components.PluginComponent import plugins
from os import path as os_path, walk as os_walk, system
from mimetypes import guess_type, add_type
add_type("application/x-debian-package", ".ipk")
add_type("application/ogg", ".ogg")
add_type("audio/x-flac", ".flac")
add_type("application/x-dream-package", ".dmpkg")
add_type("application/x-dream-image", ".nfi")
add_type("video/MP2T", ".ts")
add_type("video/x-dvd-iso", ".iso")
add_type("video/x-matroska", ".mkv")
add_type("audio/x-matroska", ".mka")
def getType(file):
(type, _) = guess_type(file)
if type is None:
# Detect some unknown types
if file[-12:].lower() == "video_ts.ifo":
return "video/x-dvd"
p = file.rfind('.')
if p == -1:
return None
ext = file[p+1:].lower()
if ext == "dat" and file[-11:-6].lower() == "avseq":
return "video/x-vcd"
return type
class Scanner:
def __init__(self, name, mimetypes= [], paths_to_scan = [], description = "", openfnc = None):
self.mimetypes = mimetypes
self.name = name
self.paths_to_scan = paths_to_scan
self.description = description
self.openfnc = openfnc
def checkFile(self, file):
return True
def handleFile(self, res, file):
if (self.mimetypes is None or file.mimetype in self.mimetypes) and self.checkFile(file):
res.setdefault(self, []).append(file)
def __repr__(self):
return "<Scanner " + self.name + ">"
def open(self, list, *args, **kwargs):
if self.openfnc is not None:
self.openfnc(list, *args, **kwargs)
class ScanPath:
def __init__(self, path, with_subdirs = False):
self.path = path
self.with_subdirs = with_subdirs
def __repr__(self):
return self.path + "(" + str(self.with_subdirs) + ")"
# we will use this in a set(), so we need to implement __hash__ and __cmp__
def __hash__(self):
return self.path.__hash__() ^ self.with_subdirs.__hash__()
def __cmp__(self, other):
if self.path < other.path:
return -1
elif self.path > other.path:
return +1
else:
return self.with_subdirs.__cmp__(other.with_subdirs)
class ScanFile:
def __init__(self, path, mimetype = None, size = None, autodetect = True):
self.path = path
if mimetype is None and autodetect:
self.mimetype = getType(path)
else:
self.mimetype = mimetype
self.size = size
def __repr__(self):
return "<ScanFile " + self.path + " (" + str(self.mimetype) + ", " + str(self.size) + " MB)>"
def execute(option):
print "execute", option
if option is None:
return
(_, scanner, files, session) = option
scanner.open(files, session)
def scanDevice(mountpoint):
scanner = [ ]
for p in plugins.getPlugins(PluginDescriptor.WHERE_FILESCAN):
l = p()
if not isinstance(l, list):
l = [l]
scanner += l
print "scanner:", scanner
res = { }
# merge all to-be-scanned paths, with priority to
# with_subdirs.
paths_to_scan = set()
# first merge them all...
for s in scanner:
paths_to_scan.update(set(s.paths_to_scan))
# ...then remove with_subdir=False when same path exists
# with with_subdirs=True
for p in paths_to_scan:
if p.with_subdirs == True and ScanPath(path=p.path) in paths_to_scan:
paths_to_scan.remove(ScanPath(path=p.path))
from Components.Harddisk import harddiskmanager
blockdev = mountpoint.rstrip("/").rsplit('/',1)[-1]
error, blacklisted, removable, is_cdrom, partitions, medium_found = harddiskmanager.getBlockDevInfo(blockdev)
# now scan the paths
for p in paths_to_scan:
path = os_path.join(mountpoint, p.path)
cmd = "ls " + path
system(cmd)
for root, dirs, files in os_walk(path):
for f in files:
path = os_path.join(root, f)
if is_cdrom and path.endswith(".wav") and path[-13:-6] == ("/track-"):
sfile = ScanFile(path,"audio/x-cda")
else:
sfile = ScanFile(path)
for s in scanner:
s.handleFile(res, sfile)
# if we really don't want to scan subdirs, stop here.
if not p.with_subdirs:
del dirs[:]
# res is a dict with scanner -> [ScanFiles]
return res
def openList(session, files):
if not isinstance(files, list):
files = [ files ]
scanner = [ ]
for p in plugins.getPlugins(PluginDescriptor.WHERE_FILESCAN):
l = p()
if not isinstance(l, list):
l = [l]
scanner += l
print "scanner:", scanner
res = { }
for file in files:
for s in scanner:
s.handleFile(res, file)
choices = [ (r.description, r, res[r], session) for r in res ]
Len = len(choices)
if Len > 1:
from Screens.ChoiceBox import ChoiceBox
session.openWithCallback(
execute,
ChoiceBox,
title = "The following viewers were found...",
list = choices
)
return True
elif Len:
execute(choices[0])
return True
return False
def openFile(session, mimetype, file):
return openList(session, [ScanFile(file, mimetype)])
| gpl-2.0 |
MounirMesselmeni/django | django/conf/locale/id/formats.py | 504 | 2135 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i"
TIME_FORMAT = 'G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M.%S.%f', # '25-10-2009 14.30.59.000200'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M.%S.%f', # '25-10-09' 14.30.59.000200'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M.%S.%f', # '10/25/06 14.30.59.000200'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M.%S.%f', # '25/10/2009 14.30.59.000200'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
ycaihua/nimbus | scripts/ios/pbxproj.py | 41 | 41211 | #!/usr/bin/env python
# encoding: utf-8
"""
pbxproj.py
Working with the pbxproj file format is a pain in the ass.
This script provides a couple basic features for parsing pbxproj files:
* Getting a dependency list
* Adding one pbxproj to another pbxproj as a dependency
Version 1.2.
History:
1.0 - October 20, 2010: Initial hacked-together version finished. It is alive!
1.1 - January 11, 2011: Add configuration settings to all configurations by default.
1.2 - June 7, 2011: Rewrote the pbxproj family of code as an ios module and made the class
more generic (no assumptions made about the project layout).
Branched from Three20's ttmodule script 2011-06-07.
Created by Jeff Verkoeyen on 2010-10-18.
Copyright 2011 Jeff Verkoeyen
Copyright 2009-2011 Facebook
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import logging
import os
import re
import sys
from relpath import relpath
pbxproj_cache = {}
class PbxprojTarget(object):
def __init__(self, name, project, guid = None):
self._name = name
self._project = project
# This target's GUID.
self._guid = guid
# The configuration list GUID points to the list of configurations for a given target.
self._configuration_list_guid = None
# The list of configuration GUIDs for this target.
self._configuration_guids = None
# The GUID for the resources build phase.
self._resources_build_phase_guid = None
# The GUID for the frameworks builds phase.
self._frameworks_build_phase_guid = None
# An array of dependency GUIDs.
self._dependency_guids = None
# An array of dependency names in the same order as the dependency guids array.
self._dependency_names = None
# An array of path:target strings.
self._dependency_paths = None
# This target's product GUID.
self._product_guid = None
# This target's product name.
self._product_name = None
def name(self):
return self._name
def configuration_list_guid(self):
if self._configuration_list_guid is None:
project_data = project.get_project_data()
result = re.search('[A-Z0-9]+ \/\* '+re.escape(self._name)+' \*\/ = {\n[ \t]+isa = PBXNativeTarget;(?:.|\n)+?buildConfigurationList = ([A-Z0-9]+) \/\* Build configuration list for PBXNativeTarget "'+re.escape(self._name)+'" \*\/;',
project_data)
if result:
(self._configuration_list_guid, ) = result.groups()
else:
# False indicates that we could not find a configuration list GUID.
self._configuration_list_guid = False
return self._configuration_list_guid
def configuration_guids(self):
if not self._configuration_guids and self.configuration_list_guid():
project_data = project.get_project_data()
match = re.search(re.escape(self.configuration_list_guid())+' \/\* Build configuration list for PBXNativeTarget "'+re.escape(self._name)+'" \*\/ = \{\n[ \t]+isa = XCConfigurationList;\n[ \t]+buildConfigurations = \(\n((?:.|\n)+?)\);', project_data)
if not match:
logging.error("Couldn't find the configuration list for the project.")
return False
(configuration_list,) = match.groups()
self._configuration_guids = re.findall('[ \t]+([A-Z0-9]+) \/\* (.+) \*\/,\n', configuration_list)
return self._configuration_guids
def guid(self):
if not self._guid:
project_data = self._project.get_project_data()
result = re.search('([A-Z0-9]+) \/\* '+re.escape(self._name)+' \*\/ = {\n[ \t]+isa = PBXNativeTarget;(?:.|\n)+?buildPhases =',
project_data)
if not result:
logging.error("Can't recover: Unable to find the GUID for the target named \""+self._name+"\" from the project loaded from: "+self._project.path())
return False
(self._guid, ) = result.groups()
return self._guid
def _gather_build_phases(self):
project_data = self._project.get_project_data()
result = re.search('[A-Z0-9]+ \/\* '+re.escape(self._name)+' \*\/ = {\n[ \t]+isa = PBXNativeTarget;(?:.|\n)+?buildPhases = \(\n((?:.|\n)+?)\);',
project_data)
if not result:
logging.error("Can't recover: Unable to find the build phases for the target named \""+self._name+"\" from the project loaded from: "+self._project.path())
return False
(build_phases, ) = result.groups()
# Get the build phases we care about.
match = re.search('([A-Z0-9]+) \/\* Resources \*\/', build_phases)
if match:
(self._resources_build_phase_guid, ) = match.groups()
else:
self._resources_build_phase_guid = False
match = re.search('([A-Z0-9]+) \/\* Frameworks \*\/', build_phases)
if not match:
logging.error("Couldn't find the Frameworks phase for the target named \""+self._name+"\" from the project loaded from: "+self._project.path())
logging.error("Please add a New Link Binary With Libraries Build Phase to your target")
logging.error("Right click your target in the project, then click Add, then New Build Phase,")
logging.error(" \"New Link Binary With Libraries Build Phase\"")
return False
(self._frameworks_build_phase_guid, ) = match.groups()
def resources_build_phase_guid(self):
if not self._resources_build_phase_guid:
self._gather_build_phases()
return self._resources_build_phase_guid
def frameworks_build_phase_guid(self):
if not self._frameworks_build_phase_guid:
self._gather_build_phases()
return self._frameworks_build_phase_guid
def dependency_guids(self):
if not self._dependency_guids:
project_data = self._project.get_project_data()
result = re.search(re.escape(self.guid())+' \/\* '+re.escape(self._name)+' \*\/ = {\n[ \t]+isa = PBXNativeTarget;(?:.|\n)+?dependencies = \(\n((?:[ \t]+[A-Z0-9]+ \/\* PBXTargetDependency \*\/,\n)*)[ \t]*\);\n',
project_data)
if not result:
logging.error("Unable to get dependencies from: "+self._project.path())
return False
(dependency_set, ) = result.groups()
self._dependency_guids = re.findall('[ \t]+([A-Z0-9]+) \/\* PBXTargetDependency \*\/,\n', dependency_set)
return self._dependency_guids
def dependency_names(self):
if not self._dependency_names:
project_data = self._project.get_project_data()
dependency_names = []
for guid in self.dependency_guids():
result = re.search(guid+' \/\* PBXTargetDependency \*\/ = \{\n[ \t]+isa = PBXTargetDependency;\n[ \t]*name = (["a-zA-Z0-9\.\-]+);',
project_data)
if result:
(dependency_name, ) = result.groups()
dependency_names.append(dependency_name)
self._dependency_names = dependency_names
return self._dependency_names
def dependency_paths(self):
if self._dependency_paths:
return self._dependency_paths
dependency_guids = self.dependency_guids()
if dependency_guids:
project_data = self._project.get_project_data()
target_proxy_guids = []
for guid in dependency_guids:
result = re.search(guid+' \/\* PBXTargetDependency \*\/ = \{\n(?:.|\n)+?targetProxy = ([A-Z0-9]+) \/\* PBXContainerItemProxy \*\/;',
project_data)
if result:
(target_proxy_guid, ) = result.groups()
target_proxy_guids.append(target_proxy_guid)
container_portal_guids = []
remote_guids = []
for guid in target_proxy_guids:
result = re.search(guid+' \/\* PBXContainerItemProxy \*\/ = \{\n(?:.|\n)+?containerPortal = ([A-Z0-9]+) \/\* .+? \*\/;(?:.|\n)+?remoteGlobalIDString = ([A-Z0-9]+)',
project_data)
if result:
(container_portal_guid, remote_guid, ) = result.groups()
container_portal_guids.append(container_portal_guid)
remote_guids.append(remote_guid)
dependency_paths = []
index = 0
for guid in container_portal_guids:
result = re.search(guid+' \/\* .+? \*\/ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = .+?; path = (.+?); sourceTree = .+?; };',
project_data)
if result:
(dependency_path, ) = result.groups()
dependency_path = dependency_path + ":" + remote_guids[index]
dependency_paths.append(dependency_path)
else:
logging.error("Unable to find the path for GUID: "+guid)
index += 1
if len(dependency_paths) != len(container_portal_guids):
logging.error("Unable to load all dependency information from the project.")
return False
self._dependency_paths = dependency_paths
elif dependency_guids is not None and dependency_guids is not False and len(dependency_guids) == 0:
self._dependency_paths = []
return self._dependency_paths
def _gather_product_details(self):
project_data = self._project.get_project_data()
result = re.search(re.escape(self.guid())+' \/\* '+re.escape(self._name)+' \*\/ = {\n[ \t]+isa = PBXNativeTarget;(?:.|\n)+?productReference = ([A-Z0-9]+) \/\* (.+?) \*\/;',
project_data)
if not result:
logging.error("Unable to get product guid from: "+self.path())
return False
(self._product_guid, self._product_name, ) = result.groups()
return True
def product_guid(self):
if not self._product_guid:
self._gather_product_details()
return self._product_guid
def product_name(self):
if not self._product_name:
self._gather_product_details()
return self._product_name
class pbxproj(object):
@staticmethod
def get_pbxproj_by_path(path, xcode_version = None):
if path not in pbxproj_cache:
pbxproj_cache[path] = pbxproj(path, xcode_version = xcode_version)
return pbxproj_cache[path]
def __init__(self, path, xcode_version = None):
# The contents of the pbxproj file loaded into memory.
self._project_data = None
self._active_target = None
self._project_name = os.path.basename(os.path.dirname(path)).replace('.xcodeproj', '')
# The path to the pbxproj file.
self._path = path
# Mapping of target names to PbxprojTarget objects.
self._targets_by_name = {}
# Mapping of target guids to PbxprojTarget objects.
self._targets_by_guid = {}
# ???
self._xcode_version = xcode_version
# The file format version for this project.
self._file_format_version = None
self._is_loaded = self._load_from_disk()
def __str__(self):
details = "\t path: \""+str(self._path)+"\"\n\ttargets:"
for target_name in self._targets_by_name:
target = self._targets_by_name[target_name]
details += "\n -> "+target.name() + " ("+target.guid()+")"
if self._active_target:
details += "\n active target: "+self._active_target
return details
def is_loaded(self):
return self._is_loaded
def uniqueid_for_target(self, target):
return self._path + ':' + target
def path(self):
return self._path
def active_target(self):
if self._active_target is None:
return None
return self.target_by_name(self._active_target)
def set_active_target(self, target_name):
self._active_target = target_name
# A pbxproj file is contained within an xcodeproj file.
# This method simply strips off the project.pbxproj part of the path.
def xcodeprojpath(self):
return os.path.dirname(self.path())
def version(self):
if not self._file_format_version:
result = re.search('\tobjectVersion = ([0-9]+);', project_data)
if not result:
logging.error("Can't recover: unable to find the project version for your target at: "+self.path())
return False
(self._file_format_version,) = result.groups()
self._file_format_version = int(self._file_format_version)
return self._file_format_version
# Fetch a specific target by its name.
def target_by_name(self, name):
if name in self._targets_by_guid:
target = self._targets_by_guid[name]
target._guid = name
return target
if name not in self._targets_by_name:
target = PbxprojTarget(name, self)
self._targets_by_name[name] = target
self._targets_by_guid[target.guid()] = target
return self._targets_by_name[name]
# Load the project data from disk.
def get_project_data(self):
if self._project_data is None:
if not os.path.exists(self.path()):
logging.info("Couldn't find the project at this path:")
logging.info(self.path())
return None
project_file = open(self.path(), 'r')
self._project_data = project_file.read()
return self._project_data
# Write the project data to disk.
def set_project_data(self, project_data, flush=False):
if self._project_data != project_data or flush:
self._project_data = project_data
if flush:
project_file = open(self.path(), 'w')
project_file.write(self._project_data)
def _load_from_disk(self):
project_data = self.get_project_data()
if project_data is None:
logging.error("Can't recover: unable to load the project data from disk, check the path:\n path: \""+self.path()+"\"")
return False
self._gather_all_targets()
return True
def _gather_all_targets(self):
project_data = self.get_project_data()
result = re.search('targets = \(\n((?:.|\n)+?)\);',
project_data)
if not result:
logging.error("Couldn't find any targets.")
return None
(target_list, ) = result.groups()
targets = re.findall('([A-Z0-9]+) \/\* (.+?) \*\/', target_list)
if not targets:
logging.error("Unable to read the targets.")
return None
for target in targets:
name = target[1]
target = PbxprojTarget(name, project = self, guid = target[0])
self._targets_by_name[name] = target
self._targets_by_guid[target.guid()] = target
return True
def dependency_names_for_target_name(self, target_name):
target = self.target_by_name(target_name)
return target.dependency_names()
def dependency_paths_for_target_name(self, target_name):
target = self.target_by_name(target_name)
return target.dependency_paths()
# Add a line to the PBXBuildFile section.
#
# <default_guid> /* <name> in Frameworks */ = {isa = PBXBuildFile; fileRef = <file_ref_hash> /* <name> */; };
#
# Returns: <default_guid> if a line was added.
# Otherwise, the existing guid is returned.
def add_buildfile(self, name, file_ref_hash, default_guid):
project_data = self.get_project_data()
match = re.search('\/\* Begin PBXBuildFile section \*\/\n((?:.|\n)+?)\/\* End PBXBuildFile section \*\/', project_data)
if not match:
logging.error("Couldn't find PBXBuildFile section.")
return None
(subtext, ) = match.groups()
buildfile_hash = None
match = re.search('([A-Z0-9]+).+?fileRef = '+re.escape(file_ref_hash), subtext)
if match:
(buildfile_hash, ) = match.groups()
logging.info("This build file already exists: "+buildfile_hash)
if buildfile_hash is None:
match = re.search('\/\* Begin PBXBuildFile section \*\/\n', project_data)
buildfile_hash = default_guid
libfiletext = "\t\t"+buildfile_hash+" /* "+name+" in Frameworks */ = {isa = PBXBuildFile; fileRef = "+file_ref_hash+" /* "+name+" */; };\n"
project_data = project_data[:match.end()] + libfiletext + project_data[match.end():]
self.set_project_data(project_data)
return buildfile_hash
# Add a line to the PBXFileReference section.
#
# <default_guid> /* <name> */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.<file_type>"; name = <name>; path = <rel_path>; sourceTree = <source_tree>; };
#
# Returns: <default_guid> if a line was added.
# Otherwise, the existing guid is returned.
def add_filereference(self, name, file_type, default_guid, rel_path, source_tree):
project_data = self.get_project_data()
quoted_rel_path = '"'+rel_path.strip('"')+'"'
fileref_hash = None
match = re.search('([A-Z0-9]+) \/\* '+re.escape(name)+' \*\/ = \{isa = PBXFileReference; lastKnownFileType = "wrapper.'+file_type+'"; name = '+re.escape(name)+'; path = '+re.escape(rel_path)+';', project_data)
if not match:
# Check again for quoted versions, just to be sure.
match = re.search('([A-Z0-9]+) \/\* '+re.escape(name)+' \*\/ = \{isa = PBXFileReference; lastKnownFileType = "wrapper.'+file_type+'"; name = '+re.escape(name)+'; path = '+re.escape(quoted_rel_path)+';', project_data)
if match:
logging.info("This file has already been added.")
(fileref_hash, ) = match.groups()
else:
match = re.search('\/\* Begin PBXFileReference section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the PBXFileReference section.")
return False
fileref_hash = default_guid
pbxfileref = "\t\t"+fileref_hash+" /* "+name+" */ = {isa = PBXFileReference; lastKnownFileType = \"wrapper."+file_type+"\"; name = "+name+"; path = "+quoted_rel_path+"; sourceTree = "+source_tree+"; };\n"
project_data = project_data[:match.end()] + pbxfileref + project_data[match.end():]
self.set_project_data(project_data)
return fileref_hash
# Add a file to the given PBXGroup.
#
# <guid> /* <name> */,
def add_file_to_group(self, name, guid, group):
project_data = self.get_project_data()
match = re.search('\/\* '+re.escape(group)+' \*\/ = \{\n[ \t]+isa = PBXGroup;\n[ \t]+children = \(\n((?:.|\n)+?)\);', project_data)
if not match:
logging.error("Couldn't find the "+group+" children.")
return False
(children,) = match.groups()
match = re.search(re.escape(guid), children)
if match:
logging.info("This file is already a member of the "+name+" group.")
else:
match = re.search('\/\* '+re.escape(group)+' \*\/ = \{\n[ \t]+isa = PBXGroup;\n[ \t]+children = \(\n', project_data)
if not match:
logging.error("Couldn't find the "+group+" group.")
return False
pbxgroup = "\t\t\t\t"+guid+" /* "+name+" */,\n"
project_data = project_data[:match.end()] + pbxgroup + project_data[match.end():]
self.set_project_data(project_data)
return True
# Add a file to the Frameworks PBXGroup.
#
# <guid> /* <name> */,
def add_file_to_frameworks(self, name, guid):
return self.add_file_to_group(name, guid, 'Frameworks')
# Add a file to the Resources PBXGroup.
#
# <guid> /* <name> */,
def add_file_to_resources(self, name, guid):
match = re.search('\/\* '+re.escape('Resources')+' \*\/ = \{\n[ \t]+isa = PBXGroup;\n[ \t]+children = \(\n((?:.|\n)+?)\);', self.get_project_data())
if not match:
return self.add_file_to_group(name, guid, 'Supporting Files')
return self.add_file_to_group(name, guid, 'Resources')
def add_file_to_phase(self, name, guid, phase_guid, phase):
project_data = self.get_project_data()
match = re.search(re.escape(phase_guid)+" \/\* "+re.escape(phase)+" \*\/ = {(?:.|\n)+?files = \(((?:.|\n)+?)\);", project_data)
if not match:
logging.error("Couldn't find the "+phase+" phase.")
return False
(files, ) = match.groups()
match = re.search(re.escape(guid), files)
if match:
logging.info("The file has already been added.")
else:
match = re.search(re.escape(phase_guid)+" \/\* "+phase+" \*\/ = {(?:.|\n)+?files = \(\n", project_data)
if not match:
logging.error("Couldn't find the "+phase+" files")
return False
frameworktext = "\t\t\t\t"+guid+" /* "+name+" in "+phase+" */,\n"
project_data = project_data[:match.end()] + frameworktext + project_data[match.end():]
self.set_project_data(project_data)
return True
def get_rel_path_to_products_dir(self):
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
build_path = os.path.join(os.path.join(os.path.dirname(Paths.src_dir), 'Build'), 'Products')
return relpath(project_path, build_path)
def add_file_to_frameworks_phase(self, name, guid):
target = self.active_target()
return self.add_file_to_phase(name, guid, target.frameworks_build_phase_guid(), 'Frameworks')
def add_file_to_resources_phase(self, name, guid):
if self._resources_guid is None:
logging.error("No resources build phase found in the destination project")
logging.error("Please add a New Copy Bundle Resources Build Phase to your target")
logging.error("Right click your target in the project, Add, New Build Phase,")
logging.error(" \"New Copy Bundle Resources Build Phase\"")
return False
return self.add_file_to_phase(name, guid, self._resources_guid, 'Resources')
def add_header_search_path(self, configuration):
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
build_path = os.path.join(os.path.join(os.path.join(os.path.dirname(Paths.src_dir), 'Build'), 'Products'), 'three20')
rel_path = relpath(project_path, build_path)
did_add_build_setting = self.add_build_setting(configuration, 'HEADER_SEARCH_PATHS', '"'+rel_path+'"')
if not did_add_build_setting:
return did_add_build_setting
# Version 46 is Xcode 4's file format.
try:
primary_version = int(self._xcode_version.split('.')[0])
except ValueError, e:
primary_version = 0
if self._file_format_version >= 46 or primary_version >= 4:
did_add_build_setting = self.add_build_setting(configuration, 'HEADER_SEARCH_PATHS', '"$(BUILT_PRODUCTS_DIR)/../../three20"')
if not did_add_build_setting:
return did_add_build_setting
did_add_build_setting = self.add_build_setting(configuration, 'HEADER_SEARCH_PATHS', '"$(BUILT_PRODUCTS_DIR)/../three20"')
if not did_add_build_setting:
return did_add_build_setting
return did_add_build_setting
def add_build_setting(self, configuration, setting_name, value):
project_data = self.get_project_data()
match = re.search('\/\* '+configuration+' \*\/ = {\n[ \t]+isa = XCBuildConfiguration;\n[ \t]+buildSettings = \{\n((?:.|\n)+?)\};', project_data)
if not match:
logging.error("Couldn't find this configuration.")
return False
settings_start = match.start(1)
settings_end = match.end(1)
(build_settings, ) = match.groups()
match = re.search(re.escape(setting_name)+' = ((?:.|\n)+?);', build_settings)
if not match:
# Add a brand new build setting. No checking for existing settings necessary.
settingtext = '\t\t\t\t'+setting_name+' = '+value+';\n'
project_data = project_data[:settings_start] + settingtext + project_data[settings_start:]
else:
# Build settings already exist. Is there one or many?
(search_paths,) = match.groups()
if re.search('\(\n', search_paths):
# Many
match = re.search(re.escape(value), search_paths)
if not match:
# If value has any spaces in it, Xcode will split it up into
# multiple entries.
escaped_value = re.escape(value).replace(' ', '",\n[ \t]+"')
match = re.search(escaped_value, search_paths)
if not match and not re.search(re.escape(value.strip('"')), search_paths):
match = re.search(re.escape(setting_name)+' = \(\n', build_settings)
build_settings = build_settings[:match.end()] + '\t\t\t\t\t'+value+',\n' + build_settings[match.end():]
project_data = project_data[:settings_start] + build_settings + project_data[settings_end:]
else:
# One
if search_paths.strip('"') != value.strip('"'):
existing_path = search_paths
path_set = '(\n\t\t\t\t\t'+value+',\n\t\t\t\t\t'+existing_path+'\n\t\t\t\t)'
build_settings = build_settings[:match.start(1)] + path_set + build_settings[match.end(1):]
project_data = project_data[:settings_start] + build_settings + project_data[settings_end:]
self.set_project_data(project_data)
return True
def get_hash_base(self, uniquename):
examplehash = '320FFFEEEDDDCCCBBBAAA000'
uniquehash = hashlib.sha224(uniquename).hexdigest().upper()
uniquehash = uniquehash[:len(examplehash) - 4]
return '320'+uniquehash
def add_framework(self, framework):
tthash_base = self.get_hash_base(framework)
fileref_hash = self.add_filereference(framework, 'frameworks', tthash_base+'0', 'System/Library/Frameworks/'+framework, 'SDKROOT')
libfile_hash = self.add_buildfile(framework, fileref_hash, tthash_base+'1')
if not self.add_file_to_frameworks(framework, fileref_hash):
return False
if not self.add_file_to_frameworks_phase(framework, libfile_hash):
return False
return True
def add_bundle(self):
tthash_base = self.get_hash_base('Three20.bundle')
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
build_path = os.path.join(Paths.src_dir, 'Three20.bundle')
rel_path = relpath(project_path, build_path)
fileref_hash = self.add_filereference('Three20.bundle', 'plug-in', tthash_base+'0', rel_path, 'SOURCE_ROOT')
libfile_hash = self.add_buildfile('Three20.bundle', fileref_hash, tthash_base+'1')
if not self.add_file_to_resources('Three20.bundle', fileref_hash):
return False
if not self.add_file_to_resources_phase('Three20.bundle', libfile_hash):
return False
return True
# Get the PBXFileReference from the given PBXBuildFile guid.
def get_filerefguid_from_buildfileguid(self, buildfileguid):
project_data = self.get_project_data()
match = re.search(buildfileguid+' \/\* .+ \*\/ = {isa = PBXBuildFile; fileRef = ([A-Z0-9]+) \/\* .+ \*\/;', project_data)
if not match:
logging.error("Couldn't find PBXBuildFile row.")
return None
(filerefguid, ) = match.groups()
return filerefguid
def get_filepath_from_filerefguid(self, filerefguid):
project_data = self.get_project_data()
match = re.search(filerefguid+' \/\* .+ \*\/ = {isa = PBXFileReference; .+ path = (.+); .+ };', project_data)
if not match:
logging.error("Couldn't find PBXFileReference row.")
return None
(path, ) = match.groups()
return path
# Get all source files that are "built" in this project. This includes files built for
# libraries, executables, and unit testing.
def get_built_sources(self):
project_data = self.get_project_data()
match = re.search('\/\* Begin PBXSourcesBuildPhase section \*\/\n((?:.|\n)+?)\/\* End PBXSourcesBuildPhase section \*\/', project_data)
if not match:
logging.error("Couldn't find PBXSourcesBuildPhase section.")
return None
(buildphasedata, ) = match.groups()
buildfileguids = re.findall('[ \t]+([A-Z0-9]+) \/\* .+ \*\/,\n', buildphasedata)
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
filenames = []
for buildfileguid in buildfileguids:
filerefguid = self.get_filerefguid_from_buildfileguid(buildfileguid)
filepath = self.get_filepath_from_filerefguid(filerefguid)
filenames.append(os.path.join(project_path, filepath.strip('"')))
return filenames
# Get all header files that are "built" in this project. This includes files built for
# libraries, executables, and unit testing.
def get_built_headers(self):
project_data = self.get_project_data()
match = re.search('\/\* Begin PBXHeadersBuildPhase section \*\/\n((?:.|\n)+?)\/\* End PBXHeadersBuildPhase section \*\/', project_data)
if not match:
logging.error("Couldn't find PBXHeadersBuildPhase section.")
return None
(buildphasedata, ) = match.groups()
buildfileguids = re.findall('[ \t]+([A-Z0-9]+) \/\* .+ \*\/,\n', buildphasedata)
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
filenames = []
for buildfileguid in buildfileguids:
filerefguid = self.get_filerefguid_from_buildfileguid(buildfileguid)
filepath = self.get_filepath_from_filerefguid(filerefguid)
filenames.append(os.path.join(project_path, filepath.strip('"')))
return filenames
def add_dependency(self, dep):
project_data = self.get_project_data()
dep_data = dep.get_project_data()
project_target = self.active_target()
dep_target = dep.active_target()
if project_data is None or dep_data is None:
return False
logging.info("\nAdding "+str(dep)+"\nto\n"+str(self))
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
dep_path = os.path.abspath(dep.xcodeprojpath())
rel_path = relpath(project_path, dep_path)
logging.info("")
logging.info("Project path: "+project_path)
logging.info("Dependency path: "+dep_path)
logging.info("Relative path: "+rel_path)
tthash_base = self.get_hash_base(dep.uniqueid_for_target(dep._active_target))
###############################################
logging.info("")
logging.info("Step 1: Add file reference to the dependency...")
pbxfileref_hash = self.add_filereference(dep._project_name+'.xcodeproj', 'pb-project', tthash_base+'0', rel_path, 'SOURCE_ROOT')
project_data = self.get_project_data()
logging.info("Done: Added file reference: "+pbxfileref_hash)
self.set_project_data(project_data)
###############################################
logging.info("")
logging.info("Step 2: Add file to Frameworks group...")
if not self.add_file_to_frameworks(dep._project_name+".xcodeproj", pbxfileref_hash):
return False
project_data = self.get_project_data()
logging.info("Done: Added file to Frameworks group.")
self.set_project_data(project_data)
###############################################
logging.info("")
logging.info("Step 3: Add dependencies...")
pbxtargetdependency_hash = None
pbxcontaineritemproxy_hash = None
match = re.search('\/\* Begin PBXTargetDependency section \*\/\n((?:.|\n)+?)\/\* End PBXTargetDependency section \*\/', project_data)
if not match:
logging.info("\tAdding a PBXTargetDependency section...")
match = re.search('\/\* End PBXSourcesBuildPhase section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the PBXSourcesBuildPhase section.")
return False
project_data = project_data[:match.end()] + "\n/* Begin PBXTargetDependency section */\n\n/* End PBXTargetDependency section */\n" + project_data[match.end():]
else:
(subtext, ) = match.groups()
match = re.search('([A-Z0-9]+) \/\* PBXTargetDependency \*\/ = {\n[ \t]+isa = PBXTargetDependency;\n[ \t]+name = '+re.escape(dep._project_name)+';\n[ \t]+targetProxy = ([A-Z0-9]+) \/\* PBXContainerItemProxy \*\/;', project_data)
if match:
(pbxtargetdependency_hash, pbxcontaineritemproxy_hash,) = match.groups()
logging.info("This dependency already exists.")
self.set_project_data(project_data)
if pbxtargetdependency_hash is None or pbxcontaineritemproxy_hash is None:
match = re.search('\/\* Begin PBXTargetDependency section \*\/\n', project_data)
pbxtargetdependency_hash = tthash_base+'1'
pbxcontaineritemproxy_hash = tthash_base+'2'
pbxtargetdependency = "\t\t"+pbxtargetdependency_hash+" /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\tname = "+dep._project_name+";\n\t\t\ttargetProxy = "+pbxcontaineritemproxy_hash+" /* PBXContainerItemProxy */;\n\t\t};\n"
project_data = project_data[:match.end()] + pbxtargetdependency + project_data[match.end():]
logging.info("Done: Added dependency.")
self.set_project_data(project_data)
###############################################
logging.info("")
logging.info("Step 3.1: Add container proxy for dependencies...")
containerExists = False
match = re.search('\/\* Begin PBXContainerItemProxy section \*\/\n((?:.|\n)+?)\/\* End PBXContainerItemProxy section \*\/', project_data)
if not match:
logging.info("\tAdding a PBXContainerItemProxy section...")
match = re.search('\/\* End PBXBuildFile section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the PBXBuildFile section.")
return False
project_data = project_data[:match.end()] + "\n/* Begin PBXContainerItemProxy section */\n\n/* End PBXContainerItemProxy section */\n" + project_data[match.end():]
else:
(subtext, ) = match.groups()
match = re.search(re.escape(pbxcontaineritemproxy_hash), subtext)
if match:
logging.info("This container proxy already exists.")
containerExists = True
self.set_project_data(project_data)
if not containerExists:
match = re.search('\/\* Begin PBXContainerItemProxy section \*\/\n', project_data)
pbxcontaineritemproxy = "\t\t"+pbxcontaineritemproxy_hash+" /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = "+pbxfileref_hash+" /* "+dep._project_name+".xcodeproj */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = "+dep_target.guid()+";\n\t\t\tremoteInfo = "+dep._project_name+";\n\t\t};\n"
project_data = project_data[:match.end()] + pbxcontaineritemproxy + project_data[match.end():]
logging.info("Done: Added container proxy.")
self.set_project_data(project_data)
###############################################
logging.info("")
logging.info("Step 3.2: Add module to the dependency list...")
match = re.search(project_target.guid()+' \/\* .+? \*\/ = {\n[ \t]+(?:.|\n)+?[ \t]+dependencies = \(\n((?:.|\n)+?)\);', project_data)
dependency_exists = False
if not match:
logging.error("Couldn't find the dependency list.")
return False
else:
(dependencylist, ) = match.groups()
match = re.search(re.escape(pbxtargetdependency_hash), dependencylist)
if match:
logging.info("This dependency has already been added.")
dependency_exists = True
if not dependency_exists:
match = re.search(project_target.guid()+' \/\* .+? \*\/ = {\n[ \t]+(?:.|\n)+?[ \t]+dependencies = \(\n', project_data)
if not match:
logging.error("Couldn't find the dependency list.")
return False
dependency_item = '\t\t\t\t'+pbxtargetdependency_hash+' /* PBXTargetDependency */,\n'
project_data = project_data[:match.end()] + dependency_item + project_data[match.end():]
logging.info("Done: Added module to the dependency list.")
self.set_project_data(project_data)
###############################################
logging.info("")
logging.info("Step 4: Create project references...")
match = re.search('\/\* Begin PBXProject section \*\/\n((?:.|\n)+?)\/\* End PBXProject section \*\/', project_data)
if not match:
logging.error("Couldn't find the project section.")
return False
project_start = match.start(1)
project_end = match.end(1)
(project_section, ) = match.groups()
reference_exists = False
did_change = False
productgroup_hash = None
match = re.search('projectReferences = \(\n((?:.|\n)+?)\n[ \t]+\);', project_section)
if not match:
logging.info("Creating project references...")
match = re.search('projectDirPath = ".*?";\n', project_section)
if not match:
logging.error("Couldn't find project references anchor.")
return False
did_change = True
project_section = project_section[:match.end()] + '\t\t\tprojectReferences = (\n\t\t\t);\n' + project_section[match.end():]
else:
(refs, ) = match.groups()
match = re.search('\{\n[ \t]+ProductGroup = ([A-Z0-9]+) \/\* Products \*\/;\n[ \t]+ProjectRef = '+re.escape(pbxfileref_hash), refs)
if match:
(productgroup_hash, ) = match.groups()
logging.info("This product group already exists: "+productgroup_hash)
reference_exists = True
if not reference_exists:
match = re.search('projectReferences = \(\n', project_section)
if not match:
logging.error("Missing the project references item.")
return False
productgroup_hash = tthash_base+'3'
reference_text = '\t\t\t\t{\n\t\t\t\t\tProductGroup = '+productgroup_hash+' /* Products */;\n\t\t\t\t\tProjectRef = '+pbxfileref_hash+' /* '+dep._project_name+'.xcodeproj */;\n\t\t\t\t},\n'
project_section = project_section[:match.end()] + reference_text + project_section[match.end():]
did_change = True
if did_change:
project_data = project_data[:project_start] + project_section + project_data[project_end:]
logging.info("Done: Created project reference.")
self.set_project_data(project_data)
###############################################
logging.info("")
logging.info("Step 4.1: Create product group...")
match = re.search('\/\* Begin PBXGroup section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the group section.")
return False
group_start = match.end()
lib_hash = None
match = re.search(re.escape(productgroup_hash)+" \/\* Products \*\/ = \{\n[ \t]+isa = PBXGroup;\n[ \t]+children = \(\n((?:.|\n)+?)\);", project_data)
if match:
logging.info("This product group already exists.")
(children, ) = match.groups()
match = re.search('([A-Z0-9]+) \/\* '+re.escape(dep_target.product_name())+' \*\/', children)
if not match:
logging.error("No product found")
return False
# TODO: Add this product.
else:
(lib_hash, ) = match.groups()
else:
lib_hash = tthash_base+'4'
productgrouptext = "\t\t"+productgroup_hash+" /* Products */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t"+lib_hash+" /* "+dep_target.product_name()+" */,\n\t\t\t);\n\t\t\tname = Products;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n"
project_data = project_data[:group_start] + productgrouptext + project_data[group_start:]
logging.info("Done: Created product group: "+lib_hash)
self.set_project_data(project_data)
###############################################
logging.info("")
logging.info("Step 4.2: Add container proxy for target product...")
containerExists = False
targetproduct_hash = tthash_base+'6'
match = re.search('\/\* Begin PBXContainerItemProxy section \*\/\n((?:.|\n)+?)\/\* End PBXContainerItemProxy section \*\/', project_data)
if not match:
logging.info("\tAdding a PBXContainerItemProxy section...")
match = re.search('\/\* End PBXBuildFile section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the PBXBuildFile section.")
return False
project_data = project_data[:match.end()] + "\n/* Begin PBXContainerItemProxy section */\n\n/* End PBXContainerItemProxy section */\n" + project_data[match.end():]
else:
(subtext, ) = match.groups()
match = re.search(re.escape(targetproduct_hash), subtext)
if match:
logging.info("This container proxy already exists.")
containerExists = True
self.set_project_data(project_data)
if not containerExists:
match = re.search('\/\* Begin PBXContainerItemProxy section \*\/\n', project_data)
pbxcontaineritemproxy = "\t\t"+targetproduct_hash+" /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = "+pbxfileref_hash+" /* "+dep._project_name+".xcodeproj */;\n\t\t\tproxyType = 2;\n\t\t\tremoteGlobalIDString = "+dep_target.guid()+";\n\t\t\tremoteInfo = "+dep._project_name+";\n\t\t};\n"
project_data = project_data[:match.end()] + pbxcontaineritemproxy + project_data[match.end():]
logging.info("Done: Added target container proxy.")
self.set_project_data(project_data)
###############################################
# This code seems to break the xcode project but doesn't seem completely crucial.
# Gr.
# logging.info("")
# logging.info("Step 4.3: Create reference proxy...")
#
# referenceExists = False
#
# match = re.search('\/\* Begin PBXReferenceProxy section \*\/\n((?:.|\n)+?)\/\* End PBXReferenceProxy section \*\/', project_data)
# if not match:
# logging.info("\tAdding a PBXReferenceProxy section...")
# match = re.search('\/\* End PBXProject section \*\/\n', project_data)
#
# if not match:
# logging.error("Couldn't find the PBXProject section.")
# return False
#
# project_data = project_data[:match.end()] + "\n/* Begin PBXReferenceProxy section */\n\n/* End PBXReferenceProxy section */\n" + project_data[match.end():]
# else:
# (subtext, ) = match.groups()
# match = re.search(re.escape(lib_hash), subtext)
# if match:
# logging.info("This reference proxy already exists.")
# referenceExists = True
#
# self.set_project_data(project_data)
#
# if not referenceExists:
# match = re.search('\/\* Begin PBXReferenceProxy section \*\/\n', project_data)
#
# referenceproxytext = "\t\t"+lib_hash+" /* "+dep_target.product_name()+" */ = {\n\t\t\tisa = PBXReferenceProxy;\n\t\t\tfileType = archive.ar;\n\t\t\tpath = \""+dep_target.product_name()+"\";\n\t\t\tremoteRef = "+targetproduct_hash+" /* PBXContainerItemProxy */;\n\t\t\tsourceTree = BUILT_PRODUCTS_DIR;\n\t\t};\n"
# project_data = project_data[:match.end()] + referenceproxytext + project_data[match.end():]
#
# logging.info("Done: Created reference proxy.")
# self.set_project_data(project_data)
###############################################
logging.info("")
logging.info("Step 5: Add target file...")
libfile_hash = self.add_buildfile(dep_target.product_name(), lib_hash, tthash_base+'5')
project_data = self.get_project_data()
logging.info("Done: Added target file.")
###############################################
logging.info("")
logging.info("Step 6: Add frameworks...")
self.add_file_to_frameworks_phase(dep_target.product_name(), libfile_hash)
project_data = self.get_project_data()
logging.info("Done: Adding module.")
self.set_project_data(project_data, flush = True)
return True
| apache-2.0 |
andyzsf/edx | lms/djangoapps/courseware/features/change_enrollment.py | 25 | 1827 | """ Provides lettuce acceptance methods for course enrollment changes """
from __future__ import absolute_import
from lettuce import world, step
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from logging import getLogger
logger = getLogger(__name__)
import time
@step(u'the course "([^"]*)" has all enrollment modes$')
def add_enrollment_modes_to_course(_step, course):
""" Add honor, audit, and verified modes to the sample course """
world.CourseModeFactory.create(
course_id=SlashSeparatedCourseKey("edx", course, 'Test_Course'),
mode_slug="verified",
mode_display_name="Verified Course",
min_price=3
)
world.CourseModeFactory.create(
course_id=SlashSeparatedCourseKey("edx", course, 'Test_Course'),
mode_slug="honor",
mode_display_name="Honor Course",
)
world.CourseModeFactory.create(
course_id=SlashSeparatedCourseKey("edx", course, 'Test_Course'),
mode_slug="audit",
mode_display_name="Audit Course",
)
@step(u'I click on Challenge Yourself$')
def challenge_yourself(_step):
""" Simulates clicking 'Challenge Yourself' button on course """
challenge_button = world.browser.find_by_css('.wrapper-tip')
challenge_button.click()
verified_button = world.browser.find_by_css('#upgrade-to-verified')
verified_button.click()
@step(u'I choose an honor code upgrade$')
def honor_code_upgrade(_step):
""" Simulates choosing the honor code mode on the upgrade page """
honor_code_link = world.browser.find_by_css('.title-expand')
honor_code_link.click()
time.sleep(1)
honor_code_checkbox = world.browser.find_by_css('#honor-code')
honor_code_checkbox.click()
upgrade_button = world.browser.find_by_name("certificate_mode")
upgrade_button.click()
| agpl-3.0 |
cria/microSICol | import_db.py | 1 | 7329 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Script to import XML data to current SICol database
# Obs: This script must be executed on root directory
# Author:Renato Arnellas Coelho renatoac at gmail dot com
import sys
import os
from xml.dom.minidom import Document,parse
def importSQLite(xml,sqlite_path='./db/sqlite.db'):
'''
xml = XML filename
sqlite_path = default is usually used
'''
from pysqlite2 import dbapi2 as sqlite
print "Connecting to SQLite database..."
if os.path.exists(sqlite_path):
#Connect
connect = sqlite.connect(sqlite_path,detect_types=sqlite.PARSE_COLNAMES,isolation_level=None)
cursor = connect.cursor()
print "Loading SQLite XML..."
doc = parse(xml)
tables = doc.getElementsByTagName('table')
for table in tables:
tablename = table.getAttribute('name')
print "Emptying table '%s'..." % tablename
rows = table.getElementsByTagName('row')
cursor.execute("DELETE FROM %s;" % tablename) #clear table first
print "Inserting values in table '%s'..." % tablename
### INSERT ITEM ###
for row in rows:
fields = row.getElementsByTagName('field')
colnames = []
colvalues = []
for field in fields:
colnames.append('`'+field.getAttribute('name')+'`')
coltype = field.getAttribute('type')
if coltype == 'integer':
colvalues.append(field.getAttribute('value'))
elif coltype == 'NULL':
colvalues.append("NULL")
else: #behaves as string
colvalues.append("'"+field.getAttribute('value').replace("'","\\'")+"'")
cursor.execute("INSERT INTO `%s` (%s) VALUES (%s);" % (tablename,",".join(colnames),",".join(colvalues) ) )
###################
#Close
cursor.close()
connect.close()
print "*** Import Finished ***"
raw_input()
else:
print "*** ERROR ***"
print "Unable to connect to SQLite database."
raw_input()
def importData(xml,host,user,pwd,dbname,port):
'''
xml = XML filename
host = MySQL host
user = MySQL root user
pwd = MySQL root password
dbname = MySQL database to be used
port = MySQL port number
'''
import MySQLdb as mysql
#Load file to Python XML object
print "Loading XML..."
doc = parse(xml)
print "Generating intermediate SQL import file..."
output = []
#Connect to database
output.append("USE %s;" % dbname)
#Set Global VARS
output.append("/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;")
output.append("/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;")
output.append("/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;")
output.append("/*!40101 SET NAMES utf8 */;")
output.append("/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;")
output.append("/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;")
output.append("/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;")
output.append("")
#Insert data in each table disabling key constrains
tables = doc.getElementsByTagName('table')
for table in tables:
tablename = table.getAttribute('name')
print "Reading table '%s'..." % tablename
rows = table.getElementsByTagName('row')
output.append("/*!40000 ALTER TABLE `%s` DISABLE KEYS */;" % tablename)
output.append("TRUNCATE TABLE `%s`;" % tablename) #clear table first
### INSERT ITEM ###
for row in rows:
fields = row.getElementsByTagName('field')
colnames = []
colvalues = []
for field in fields:
colnames.append('`'+field.getAttribute('name')+'`')
coltype = field.getAttribute('type')
if coltype == 'integer':
colvalues.append(field.getAttribute('value'))
elif coltype == 'NULL':
colvalues.append("NULL")
else: #behaves as string
colvalues.append("'"+field.getAttribute('value').replace("'","\\'")+"'")
output.append("INSERT INTO `%s`.`%s` (%s) VALUES (%s);" % (dbname,tablename,",".join(colnames),",".join(colvalues) ) )
###################
output.append("/*!40000 ALTER TABLE `%s` ENABLE KEYS */;" % tablename)
#Set Global VARS
output.append("")
output.append("/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;")
output.append("/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;")
output.append("/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;")
output.append("/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;")
output.append("/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;")
output.append("/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;")
output.append("/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;")
#Save SQL file
open('import.sql','w').write("\n".join(output).encode('utf-8'))
print "Running SQL import..."
sicol_path = os.getcwd()+os.sep+'db'+os.sep+'scripts'+os.sep
import platform
if platform.system() == "Windows" or platform.system() == "Microsoft":
mysql_path = [x for x in os.environ['PATH'].split(";") if x.lower().find('mysql') != -1]
else: #UNIX
pipe = os.popen("which mysql") #grab where MySQL is installed
mysql_path = pipe.read().strip()
if mysql_path == '' or mysql_path == []:
print "*********** ERROR ***********"
print "Please insert path to executable directory (mysql.exe) in OS 'PATH' variable."
raw_input() #Wait for user input...
else:
if platform.system() == "Windows" or platform.system() == "Microsoft":
#Ignore whether PATH ends with '\' or not
mysql_path = mysql_path[0]
if mysql_path[-1] != '\\': mysql_path += '\\'
mysql_path = '"' + mysql_path + 'mysql.exe"'
try:
bd_version = dbname.split("_")[1]
except Exception,e:
print "*********** ERROR ***********"
print "Please type \"sicol_v###\" where ### = version number."
raw_input() #Wait for user input...
return
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,os.getcwd()+os.sep+"import.sql") )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
print "*** Import Finished ***"
raw_input()
#If this script is called locally...
if __name__ == "__main__":
print "*** Import SICol Database ***"
opt = raw_input("Import MySQL data? (y/n)")[0].lower()
if opt == 'y':
import getpass
import os.path
host = raw_input("host=")
port = raw_input("port=")
root_login = raw_input("administrator login=")
root_pwd = getpass.getpass("administrator password=")
dbname = raw_input("database name=")
xml = raw_input("import XML filename=")
while not os.path.exists(xml) and xml != '':
print "*** ERROR ***"
print "Specified file does not exist!"
xml = raw_input("import XML filename=")
if xml != '':
importData(xml,host,root_login,root_pwd,dbname,port)
opt = raw_input("Import SQLite data? (y/n)")[0].lower()
if opt == 'y':
xml = raw_input("import XML filename=")
while not os.path.exists(xml) and xml != '':
print "*** ERROR ***"
print "Specified file does not exist!"
xml = raw_input("import XML filename=")
if xml != '':
importSQLite(xml)
| gpl-2.0 |
grhawk/ASE | ase/calculators/elk.py | 2 | 14256 | import os
import numpy as np
from ase.units import Bohr, Hartree
from ase.io.elk import read_elk
from ase.calculators.calculator import FileIOCalculator, Parameters, kpts2mp, \
ReadError
elk_parameters = {
'swidth': Hartree,
}
class ELK(FileIOCalculator):
command = 'elk > elk.out'
implemented_properties = ['energy', 'forces']
def __init__(self, restart=None, ignore_bad_restart_file=False,
label=os.curdir, atoms=None, **kwargs):
"""Construct ELK calculator.
The keyword arguments (kwargs) can be one of the ASE standard
keywords: 'xc', 'kpts' and 'smearing' or any of ELK'
native keywords.
"""
FileIOCalculator.__init__(self, restart, ignore_bad_restart_file,
label, atoms, **kwargs)
def set_label(self, label):
self.label = label
self.directory = label
self.prefix = ''
self.out = os.path.join(label, 'INFO.OUT')
def check_state(self, atoms):
system_changes = FileIOCalculator.check_state(self, atoms)
# Ignore boundary conditions (ELK always uses them):
if 'pbc' in system_changes:
system_changes.remove('pbc')
return system_changes
def set(self, **kwargs):
changed_parameters = FileIOCalculator.set(self, **kwargs)
if changed_parameters:
self.reset()
def write_input(self, atoms, properties=None, system_changes=None):
FileIOCalculator.write_input(self, atoms, properties, system_changes)
self.initialize(atoms)
self.parameters.write(os.path.join(self.directory, 'parameters.ase'))
if 'xctype' in self.parameters:
if 'xc' in self.parameters:
raise RuntimeError("You can't use both 'xctype' and 'xc'!")
if self.parameters.get('autokpt'):
if 'kpts' in self.parameters:
raise RuntimeError("You can't use both 'autokpt' and 'kpts'!")
if 'ngridk' in self.parameters:
raise RuntimeError("You can't use both 'autokpt' and 'ngridk'!")
if 'ngridk' in self.parameters:
if 'kpts' in self.parameters:
raise RuntimeError("You can't use both 'ngridk' and 'kpts'!")
if self.parameters.get('autoswidth'):
if 'smearing' in self.parameters:
raise RuntimeError("You can't use both 'autoswidth' and 'smearing'!")
if 'swidth' in self.parameters:
raise RuntimeError("You can't use both 'autoswidth' and 'swidth'!")
fd = open(os.path.join(self.directory, 'elk.in'), 'w')
inp = {}
inp.update(self.parameters)
if 'xc' in self.parameters:
xctype = {'LDA': 3, # PW92
'PBE': 20,
'REVPBE': 21,
'PBESOL': 22,
'WC06': 26,
'AM05': 30}[self.parameters.xc]
inp['xctype'] = xctype
del inp['xc']
if 'kpts' in self.parameters:
mp = kpts2mp(atoms, self.parameters.kpts)
inp['ngridk'] = tuple(mp)
vkloff = [] # is this below correct?
for nk in mp:
if nk % 2 == 0: # shift kpoint away from gamma point
vkloff.append(0.5)
else:
vkloff.append(0)
inp['vkloff'] = vkloff
del inp['kpts']
if 'smearing' in self.parameters:
name = self.parameters.smearing[0].lower()
if name == 'methfessel-paxton':
stype = self.parameters.smearing[2]
else:
stype = {'gaussian': 0,
'fermi-dirac': 3,
}[name]
inp['stype'] = stype
inp['swidth'] = self.parameters.smearing[1]
del inp['smearing']
# convert keys to ELK units
for key, value in inp.items():
if key in elk_parameters:
inp[key] /= elk_parameters[key]
# write all keys
for key, value in inp.items():
fd.write('%s\n' % key)
if isinstance(value, bool):
fd.write('.%s.\n\n' % ('false', 'true')[value])
elif isinstance(value, (int, float)):
fd.write('%s\n\n' % value)
else:
fd.write('%s\n\n' % ' '.join([str(x) for x in value]))
# cell
fd.write('avec\n')
for vec in atoms.cell:
fd.write('%.14f %.14f %.14f\n' % tuple(vec / Bohr))
fd.write('\n')
# atoms
species = {}
symbols = []
for a, (symbol, m) in enumerate(
zip(atoms.get_chemical_symbols(),
atoms.get_initial_magnetic_moments())):
if symbol in species:
species[symbol].append((a, m))
else:
species[symbol] = [(a, m)]
symbols.append(symbol)
fd.write('atoms\n%d\n' % len(species))
#scaled = atoms.get_scaled_positions(wrap=False)
scaled = np.linalg.solve(atoms.cell.T, atoms.positions.T).T
for symbol in symbols:
fd.write("'%s.in' : spfname\n" % symbol)
fd.write('%d\n' % len(species[symbol]))
for a, m in species[symbol]:
fd.write('%.14f %.14f %.14f 0.0 0.0 %.14f\n' %
(tuple(scaled[a])+ (m,)))
# species
species_path = self.parameters.get('species_dir')
if species_path is None:
species_path = os.environ.get('ELK_SPECIES_PATH')
if species_path is None:
raise RuntimeError(
'Missing species directory! Use species_dir ' +
'parameter or set $ELK_SPECIES_PATH environment variable.')
# if sppath is present in elk.in it overwrites species blocks!
fd.write("sppath\n'%s'\n\n" % species_path)
def read(self, label):
FileIOCalculator.read(self, label)
totenergy = os.path.join(self.directory, 'TOTENERGY.OUT')
eigval = os.path.join(self.directory, 'EIGVAL.OUT')
kpoints = os.path.join(self.directory, 'KPOINTS.OUT')
for filename in [totenergy, eigval, kpoints, self.out]:
if not os.path.isfile(filename):
raise ReadError
# read state from elk.in because *.OUT do not provide enough digits!
self.atoms = read_elk(os.path.join(self.directory, 'elk.in'))
self.parameters = Parameters.read(os.path.join(self.directory,
'parameters.ase'))
self.initialize(self.atoms)
self.read_results()
def read_results(self):
converged = self.read_convergence()
if not converged:
raise RuntimeError('ELK did not converge! Check ' + self.out)
self.read_energy()
if self.parameters.get('tforce'):
self.read_forces()
self.nbands = self.read_number_of_bands()
self.nelect = self.read_number_of_electrons()
self.niter = self.read_number_of_iterations()
self.magnetic_moment = self.read_magnetic_moment()
def initialize(self, atoms):
if 'spinpol' not in self.parameters: # honor elk.in settings
self.spinpol = atoms.get_initial_magnetic_moments().any()
else:
self.spinpol = self.parameters['spinpol']
def get_forces(self, atoms):
if not self.parameters.get('tforce'):
raise NotImplementedError
return FileIOCalculator.get_forces(self, atoms)
def read_energy(self):
fd = open(os.path.join(self.directory, 'TOTENERGY.OUT'), 'r')
e = float(fd.readlines()[-1]) * Hartree
self.results['free_energy'] = e
self.results['energy'] = e
def read_forces(self):
lines = open(self.out, 'r').readlines()
forces = np.zeros([len(self.atoms), 3])
forces = []
atomnum = 0
for line in lines:
if line.rfind('total force') > -1:
forces.append(np.array([float(f) for f in line.split(':')[1].split()]))
atomnum =+ 1
self.results['forces'] = np.array(forces) * Hartree / Bohr
def read_convergence(self):
converged = False
text = open(self.out).read().lower()
if ('convergence targets achieved' in text and
'reached self-consistent loops maximum' not in text):
converged = True
return converged
# more methods
def get_number_of_bands(self):
return self.nbands
def get_number_of_electrons(self):
return self.nelect
def get_number_of_iterations(self):
return self.niter
def get_number_of_spins(self):
return 1 + int(self.spinpol)
def get_magnetic_moment(self, atoms):
return self.magnetic_moment
def get_magnetic_moments(self, atoms):
# not implemented yet, so
# so set the total magnetic moment on the atom no. 0 and fill with 0.0
magmoms = [0.0 for a in range(len(atoms))]
magmoms[0] = self.get_magnetic_moment(atoms)
return np.array(magmoms)
def get_spin_polarized(self):
return self.spinpol
def get_eigenvalues(self, kpt=0, spin=0):
return self.read_eigenvalues(kpt, spin, 'eigenvalues')
def get_occupation_numbers(self, kpt=0, spin=0):
return self.read_eigenvalues(kpt, spin, 'occupations')
def get_ibz_k_points(self):
return self.read_kpts(mode='ibz_k_points')
def get_fermi_level(self):
return self.read_fermi()
def read_kpts(self, mode='ibz_k_points'):
""" Returns list of kpts weights or kpts coordinates. """
values = []
assert mode in ['ibz_k_points' , 'k_point_weights'], 'mode not in [\'ibz_k_points\' , \'k_point_weights\']'
kpoints = os.path.join(self.directory, 'KPOINTS.OUT')
lines = open(kpoints).readlines()
kpts = None
for line in lines:
if line.rfind(': nkpt') > -1:
kpts = int(line.split(':')[0].strip())
break
assert not kpts is None
text = lines[1:] # remove first line
values = []
for line in text:
if mode == 'ibz_k_points':
b = [float(c.strip()) for c in line.split()[1:-3]]
else:
b = [float(c.strip()) for c in line.split()[-2]]
values.append(b)
if len(values) == 0:
values = None
return np.array(values)
def read_number_of_bands(self):
nbands = None
eigval = os.path.join(self.directory, 'EIGVAL.OUT')
lines = open(eigval).readlines()
for line in lines:
if line.rfind(': nstsv') > -1:
nbands = int(line.split(':')[0].strip())
break
if self.get_spin_polarized():
nbands = nbands / 2
return nbands
def read_number_of_electrons(self):
nelec = None
text = open(self.out).read().lower()
# Total electronic charge
for line in iter(text.split('\n')):
if line.rfind('total electronic charge :') > -1:
nelec = float(line.split(':')[1].strip())
break
return nelec
def read_number_of_iterations(self):
niter = None
lines = open(self.out).readlines()
for line in lines:
if line.rfind(' Loop number : ') > -1:
niter = int(line.split(':')[1].split()[0].strip()) # last iter
return niter
def read_magnetic_moment(self):
magmom = None
lines = open(self.out).readlines()
for line in lines:
if line.rfind('total moment :') > -1:
magmom = float(line.split(':')[1].strip()) # last iter
return magmom
def read_electronic_temperature(self):
swidth = None
text = open(self.out).read().lower()
for line in iter(text.split('\n')):
if line.rfind('smearing width :') > -1:
swidth = float(line.split(':')[1].strip())
break
return Hartree*swidth
def read_eigenvalues(self, kpt=0, spin=0, mode='eigenvalues'):
""" Returns list of last eigenvalues, occupations
for given kpt and spin. """
values = []
assert mode in ['eigenvalues' , 'occupations'], 'mode not in [\'eigenvalues\' , \'occupations\']'
eigval = os.path.join(self.directory, 'EIGVAL.OUT')
lines = open(eigval).readlines()
nstsv = None
for line in lines:
if line.rfind(': nstsv') > -1:
nstsv = int(line.split(':')[0].strip())
break
assert not nstsv is None
kpts = None
for line in lines:
if line.rfind(': nkpt') > -1:
kpts = int(line.split(':')[0].strip())
break
assert not kpts is None
text = lines[3:] # remove first 3 lines
# find the requested k-point
beg = 2 + (nstsv + 4) * kpt
end = beg + nstsv
if self.get_spin_polarized():
# elk prints spin-up and spin-down together
if spin == 0:
beg = beg
end = beg + nstsv / 2
else:
beg = beg - nstsv / 2 - 3
end = end
values = []
for line in text[beg:end]:
b = [float(c.strip()) for c in line.split()[1:]]
values.append(b)
if mode == 'eigenvalues':
values = [Hartree*v[0] for v in values]
else:
values = [v[1] for v in values]
if len(values) == 0:
values = None
return np.array(values)
def read_fermi(self):
"""Method that reads Fermi energy in Hartree from the output file
and returns it in eV"""
E_f=None
text = open(self.out).read().lower()
for line in iter(text.split('\n')):
if line.rfind('fermi :') > -1:
E_f = float(line.split(':')[1].strip())
E_f = E_f*Hartree
return E_f
| gpl-2.0 |
Bobjoy/iOSBlogCN | Export.py | 65 | 1482 | __author__ = 'wwxiang'
#coding=utf-8
import os
import re
work = os.getcwd()
resxml = work + os.path.sep + 'blogcn.opml'
workmd = work + os.path.sep + 'README.md'
def handler():
isblock = True
handlerData = []
lineNo = 0
try:
with open(workmd,'rb') as linefs:
lineCout = len(linefs.readlines())
linefs.close()
with open(workmd,'rb') as fs:
while isblock:
lineNo += 1
val = fs.readline().decode()
if lineNo == lineCout:
isblock = False
if not val[0] == '[':
continue
title = re.findall(r'\[(.+?)\]',val)[0]
xmlUrl = re.findall(r'<(.+?)>',val)[0]
htmlUrl = re.findall(r'\((.+?)\)',val)[0]
handlerData.append('<outline text="{0}" title="{0}" type="rss" xmlUrl="{1}" htmlUrl="{2}"/>'.format(title,xmlUrl,htmlUrl))
fs.close()
except:
print('错误处理','读取文件失败')
return
export_xml = '<?xml version="1.0" encoding="UTF-8"?><opml version="1.0"><head><title>导出订阅</title></head><body><outline text="ios" title="ios" >\n'
export_xml += '\r\n'.join(handlerData)
export_xml += '</outline></body></opml>\r\n'
with open(resxml,'wb') as fs:
fs.write(export_xml.encode())
fs.close()
print('res.xml文件处理完成')
pass
if os.path.isfile(workmd):
handler()
| gpl-2.0 |
Sazzadmasud/Keystone_hash_token | keystone/common/sql/core.py | 5 | 13694 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQL backends for the various services.
Before using this module, call initialize(). This has to be done before
CONF() because it sets up configuration options.
"""
import contextlib
import functools
from oslo.config import cfg
import six
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from sqlalchemy.orm.attributes import flag_modified, InstrumentedAttribute
from sqlalchemy import types as sql_types
from keystone.common import utils
from keystone import exception
from keystone.openstack.common.db import exception as db_exception
from keystone.openstack.common.db import options as db_options
from keystone.openstack.common.db.sqlalchemy import models
from keystone.openstack.common.db.sqlalchemy import session as db_session
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import jsonutils
CONF = cfg.CONF
ModelBase = declarative.declarative_base()
# For exporting to other modules
Column = sql.Column
Index = sql.Index
String = sql.String
Integer = sql.Integer
Enum = sql.Enum
ForeignKey = sql.ForeignKey
DateTime = sql.DateTime
IntegrityError = sql.exc.IntegrityError
DBDuplicateEntry = db_exception.DBDuplicateEntry
OperationalError = sql.exc.OperationalError
NotFound = sql.orm.exc.NoResultFound
Boolean = sql.Boolean
Text = sql.Text
UniqueConstraint = sql.UniqueConstraint
PrimaryKeyConstraint = sql.PrimaryKeyConstraint
joinedload = sql.orm.joinedload
# Suppress flake8's unused import warning for flag_modified:
flag_modified = flag_modified
def initialize():
"""Initialize the module."""
db_options.set_defaults(
sql_connection="sqlite:///keystone.db",
sqlite_db="keystone.db")
def initialize_decorator(init):
"""Ensure that the length of string field do not exceed the limit.
This decorator check the initialize arguments, to make sure the
length of string field do not exceed the length limit, or raise a
'StringLengthExceeded' exception.
Use decorator instead of inheritance, because the metaclass will
check the __tablename__, primary key columns, etc. at the class
definition.
"""
def initialize(self, *args, **kwargs):
cls = type(self)
for k, v in kwargs.items():
if hasattr(cls, k):
attr = getattr(cls, k)
if isinstance(attr, InstrumentedAttribute):
column = attr.property.columns[0]
if isinstance(column.type, String):
if not isinstance(v, six.text_type):
v = six.text_type(v)
if column.type.length and \
column.type.length < len(v):
raise exception.StringLengthExceeded(
string=v, type=k, length=column.type.length)
init(self, *args, **kwargs)
return initialize
ModelBase.__init__ = initialize_decorator(ModelBase.__init__)
# Special Fields
class JsonBlob(sql_types.TypeDecorator):
impl = sql.Text
def process_bind_param(self, value, dialect):
return jsonutils.dumps(value)
def process_result_value(self, value, dialect):
return jsonutils.loads(value)
class DictBase(models.ModelBase):
attributes = []
@classmethod
def from_dict(cls, d):
new_d = d.copy()
new_d['extra'] = dict((k, new_d.pop(k)) for k in six.iterkeys(d)
if k not in cls.attributes and k != 'extra')
return cls(**new_d)
def to_dict(self, include_extra_dict=False):
"""Returns the model's attributes as a dictionary.
If include_extra_dict is True, 'extra' attributes are literally
included in the resulting dictionary twice, for backwards-compatibility
with a broken implementation.
"""
d = self.extra.copy()
for attr in self.__class__.attributes:
d[attr] = getattr(self, attr)
if include_extra_dict:
d['extra'] = self.extra.copy()
return d
def __getitem__(self, key):
if key in self.extra:
return self.extra[key]
return getattr(self, key)
class ModelDictMixin(object):
@classmethod
def from_dict(cls, d):
"""Returns a model instance from a dictionary."""
return cls(**d)
def to_dict(self):
"""Returns the model's attributes as a dictionary."""
names = (column.name for column in self.__table__.columns)
return dict((name, getattr(self, name)) for name in names)
_engine_facade = None
def _get_engine_facade():
global _engine_facade
if not _engine_facade:
_engine_facade = db_session.EngineFacade.from_config(
CONF.database.connection, CONF)
return _engine_facade
def cleanup():
global _engine_facade
_engine_facade = None
def get_engine():
return _get_engine_facade().get_engine()
def get_session(expire_on_commit=False):
return _get_engine_facade().get_session(expire_on_commit=expire_on_commit)
@contextlib.contextmanager
def transaction(expire_on_commit=False):
"""Return a SQLAlchemy session in a scoped transaction."""
session = get_session(expire_on_commit=expire_on_commit)
with session.begin():
yield session
def truncated(f):
"""Ensure list truncation is detected in Driver list entity methods.
This is designed to wrap and sql Driver list_{entity} methods in order to
calculate if the resultant list has been truncated. Provided a limit dict
is found in the hints list, we increment the limit by one so as to ask the
wrapped function for one more entity than the limit, and then once the list
has been generated, we check to see if the original limit has been
exceeded, in which case we truncate back to that limit and set the
'truncated' boolean to 'true' in the hints limit dict.
"""
@functools.wraps(f)
def wrapper(self, hints, *args, **kwargs):
if not hasattr(hints, 'get_limit'):
raise exception.UnexpectedError(
_('Cannot truncate a driver call without hints list as '
'first parameter after self '))
limit_dict = hints.get_limit()
if limit_dict is None:
return f(self, hints, *args, **kwargs)
# A limit is set, so ask for one more entry than we need
list_limit = limit_dict['limit']
hints.set_limit(list_limit + 1)
ref_list = f(self, hints, *args, **kwargs)
# If we got more than the original limit then trim back the list and
# mark it truncated. In both cases, make sure we set the limit back
# to its original value.
if len(ref_list) > list_limit:
hints.set_limit(list_limit, truncated=True)
return ref_list[:list_limit]
else:
hints.set_limit(list_limit)
return ref_list
return wrapper
def _filter(model, query, hints):
"""Applies filtering to a query.
:param model: the table model in question
:param query: query to apply filters to
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
:returns query: query, updated with any filters satisfied
"""
def inexact_filter(model, query, filter_, hints):
"""Applies an inexact filter to a query.
:param model: the table model in question
:param query: query to apply filters to
:param filter_: the dict that describes this filter
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
:returns query: query updated to add any inexact filters we could
satisfy
"""
column_attr = getattr(model, filter_['name'])
# TODO(henry-nash): Sqlalchemy 0.7 defaults to case insensitivity
# so once we find a way of changing that (maybe on a call-by-call
# basis), we can add support for the case sensitive versions of
# the filters below. For now, these case sensitive versions will
# be handled at the controller level.
if filter_['case_sensitive']:
return query
if filter_['comparator'] == 'contains':
query_term = column_attr.ilike('%%%s%%' % filter_['value'])
elif filter_['comparator'] == 'startswith':
query_term = column_attr.ilike('%s%%' % filter_['value'])
elif filter_['comparator'] == 'endswith':
query_term = column_attr.ilike('%%%s' % filter_['value'])
else:
# It's a filter we don't understand, so let the caller
# work out if they need to do something with it.
return query
hints.remove(filter_)
return query.filter(query_term)
def exact_filter(model, filter_, cumulative_filter_dict, hints):
"""Applies an exact filter to a query.
:param model: the table model in question
:param filter_: the dict that describes this filter
:param cumulative_filter_dict: a dict that describes the set of
exact filters built up so far
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
:returns: updated cumulative dict
"""
key = filter_['name']
if isinstance(getattr(model, key).property.columns[0].type,
sql.types.Boolean):
cumulative_filter_dict[key] = (
utils.attr_as_boolean(filter_['value']))
else:
cumulative_filter_dict[key] = filter_['value']
hints.remove(filter_)
return cumulative_filter_dict
filter_dict = {}
for filter_ in hints.filters():
# TODO(henry-nash): Check if name is valid column, if not skip
if filter_['comparator'] == 'equals':
filter_dict = exact_filter(model, filter_, filter_dict, hints)
else:
query = inexact_filter(model, query, filter_, hints)
# Apply any exact filters we built up
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def _limit(query, hints):
"""Applies a limit to a query.
:param query: query to apply filters to
:param hints: contains the list of filters and limit details.
:returns updated query
"""
# NOTE(henry-nash): If we were to implement pagination, then we
# we would expand this method to support pagination and limiting.
# If we satisfied all the filters, set an upper limit if supplied
list_limit = hints.get_limit()
if list_limit:
query = query.limit(list_limit['limit'])
return query
def filter_limit_query(model, query, hints):
"""Applies filtering and limit to a query.
:param model: table model
:param query: query to apply filters to
:param hints: contains the list of filters and limit details. This may
be None, indicating that there are no filters or limits
to be applied. If it's not None, then any filters
satisfied here will be removed so that the caller will
know if any filters remain.
:returns: updated query
"""
if hints is None:
return query
# First try and satisfy any filters
query = _filter(model, query, hints)
# NOTE(henry-nash): Any unsatisfied filters will have been left in
# the hints list for the controller to handle. We can only try and
# limit here if all the filters are already satisfied since, if not,
# doing so might mess up the final results. If there are still
# unsatisfied filters, we have to leave any limiting to the controller
# as well.
if not hints.filters():
return _limit(query, hints)
else:
return query
def handle_conflicts(conflict_type='object'):
"""Converts select sqlalchemy exceptions into HTTP 409 Conflict."""
def decorator(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except db_exception.DBDuplicateEntry as e:
raise exception.Conflict(type=conflict_type,
details=six.text_type(e))
except db_exception.DBError as e:
# TODO(blk-u): inspecting inner_exception breaks encapsulation;
# oslo.db should provide exception we need.
if isinstance(e.inner_exception, IntegrityError):
raise exception.Conflict(type=conflict_type,
details=six.text_type(e))
raise
return wrapper
return decorator
| apache-2.0 |
longde123/MultiversePlatform | server/config/friendworld/extensions_proxy.py | 1 | 50308 | #
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
from java.util.concurrent import *
from java.util import *
from java.lang import *
from java.net import *
from java.sql import *
from multiverse.mars import *
from multiverse.mars.core import *
from multiverse.mars.objects import *
from multiverse.mars.util import *
from multiverse.mars.plugins import *
from multiverse.server.math import *
from multiverse.server.plugins import *
from multiverse.server.events import *
from multiverse.server.objects import *
from multiverse.server.engine import *
from multiverse.server.util import *
import time
import sys
driverName = "com.mysql.jdbc.Driver"
Class.forName(driverName)
# photo storage
places_url = "http://places.multiverse.net/"
# host running web database
webdb_host = "webdb.mv-places.com"
# for testing
#webdb_host = "localhost"
ProxyPlugin.MaxConcurrentUsers = 400
ROOM_PLAYER_LIMIT = 50
maxUsersProp = Engine.getProperty("places.max_concurrent_users")
if maxUsersProp != None:
ProxyPlugin.MaxConcurrentUsers = int(maxUsersProp)
roomLimitProp = Engine.getProperty("places.room_player_limit")
if roomLimitProp != None:
ROOM_PLAYER_LIMIT = int(roomLimitProp)
AGENT_NAME = Engine.getAgent().getName()
TOKEN_LIFE = 30000 # 30 seconds after which the token expires
def getDomainHost():
hostName = Engine.getMessageServerHostname()
if hostName == 'localhost':
try:
localMachine = InetAddress.getLocalHost()
hostName = localMachine.getHostName()
except UnknownHostException:
Log.error("getDomainHost: couldn't get host name from local IP address %s" % str(localMachine))
Log.debug("getDomainHost: hostname = %s" % hostName)
return hostName
domainHostName = getDomainHost()
class SetMeshCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand();
playerOid = cmdEvent.getObjectOid()
meshstring = cmd[cmd.index(' ')+1:]
submeshes = LinkedList()
meshlist = meshstring.split()
basemesh = meshlist[0]
for i in range(1, len(meshlist)-1, 2):
submesh = DisplayContext.Submesh(meshlist[i], meshlist[i+1])
submeshes.add(submesh)
Log.debug("/setmesh: oid=" + str(playerOid) + " to: " + meshstring)
WorldManagerClient.modifyDisplayContext(playerOid, WorldManagerClient.ModifyDisplayContextAction.REPLACE, basemesh, submeshes)
class PlayAnimationCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand();
playerOid = cmdEvent.getObjectOid()
animation = cmd[cmd.index(' ')+1:]
Log.debug("/playanimation: oid=" + str(playerOid) + " with: " + animation);
AnimationClient.playSingleAnimation(playerOid, animation)
class DanceCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
playerOid = cmdEvent.getObjectOid()
Log.debug("/dance: oid=" + str(playerOid))
if len(args) == 1:
currentDanceState = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate")
newDanceState = 0
if currentDanceState == 0:
rand = Random()
newDanceState = int(rand.nextInt(6)) + 1
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate", newDanceState)
elif len(args) == 2:
if args[1] == "on":
newDanceState = int(rand.nextInt(6)) + 1
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate", newDanceState)
elif args[1] == "off" or args[1] == "0":
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate", 0)
else:
try:
newDanceState = int(args[1])
if newDanceState >= 1 and newDanceState <= 6:
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate", newDanceState)
except:
pass
class GestureCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
playerOid = cmdEvent.getObjectOid()
Log.debug("/gesture: oid=" + str(playerOid))
if len(args) == 1:
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "gesturestate", Boolean(not EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "gesturestate")))
elif len(args) == 2:
if args[1] == "on":
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "gesturestate", Boolean(True))
if args[1] == "off":
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "gesturestate", Boolean(False))
sitList = {
'low' : 'ntrl_sit_50cm',
'med' : 'ntrl_sit_75cm',
'high' : 'ntrl_sit_85cm',
'1' : 'ntrl_sit_50cm_attd_01_idle_01',
'2' : 'ntrl_sit_50cm_attd_02_idle_01',
'3' : 'ntrl_sit_50cm_attd_03_idle_01',
}
class SitCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
playerOid = cmdEvent.getObjectOid()
Log.debug("/sit: oid=" + str(playerOid))
if len(args) == 1:
Log.debug("/sit: oid=" + str(playerOid))
if (not WorldManagerClient.getObjectProperty(playerOid, "sitstate")):
AnimationClient.playSingleAnimation(playerOid, "sit") # stand to sit
else:
# AnimationClient.playSingleAnimation(playerOid, "stand") # sit to stand
pass
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(not EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate")))
elif len(args) == 2:
sitStyle = args[1]
Log.debug("/sit: oid=" + str(playerOid) + ", sit style=" + sitStyle)
if sitStyle == "on":
AnimationClient.playSingleAnimation(playerOid, "sit") # stand to sit
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(True))
return
elif sitStyle == "off":
# AnimationClient.playSingleAnimation(playerOid, "stand") # sit to stand
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(False))
return
animName = 'sit'
if sitStyle in sitList.keys():
animName = sitList[sitStyle]
if (not WorldManagerClient.getObjectProperty(playerOid, "sitstate")):
AnimationClient.playSingleAnimation(playerOid, animName) # stand to sit
else:
# AnimationClient.playSingleAnimation(playerOid, "stand") # sit to stand
pass
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(not EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate")))
class GMCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
playerOid = cmdEvent.getObjectOid()
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
if isAdmin(accountId):
Log.debug("/gmmode: oid=" + str(playerOid))
gmMode = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "GMMode")
if gmMode == None:
gmMode = False
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "GMMode", Boolean(not gmMode))
class PropertyCommand(ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
if len(args) == 3:
playerOid = cmdEvent.getObjectOid()
Log.debug("/property: oid=" + str(playerOid) + " " + args[1] + " " + args[2])
propName = args[1]
propValue = args[2]
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, propName, propValue)
if len(args) == 2:
playerOid = cmdEvent.getObjectOid()
Log.debug("/property: oid=" + str(playerOid) + " " + args[1])
propName = args[1]
propValue = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, propName)
WorldManagerClient.sendObjChatMsg(playerOid, 0, str(propValue))
class IgnoreCommand(ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
player = proxyPlugin.getPlayer(playerOid)
cmd = cmdEvent.getCommand()
args = cmd.split()
Log.debug("/ignore: oid=%s; cmd=%s; args=%s" % (str(playerOid), cmd, args))
# Rest for 2+ but only ignore the first.
# Additional args may be first name, last name, etc.,
# for greater ignore granularity in the future.
if len(args) >= 2:
result = proxyPlugin.matchingPlayers(player, args[1], True)
if result is not None:
oids = result[0]
if oids is not None and oids.size() > 0:
if playerOid in oids: # can't ignore self
# This is ugly, but remove(playerOid) doesn't
# work (playerOid is treated as an index), and
# indexOf(playerOid) returns -1.
for i in range(len(oids)):
if playerOid == oids[i]:
oids.remove(i)
break;
# Make sure removing playerOid didn't empty the list.
if oids.size() > 0:
proxyPlugin.updateIgnoredOids(player, oids, None)
WorldManagerClient.sendObjChatMsg(playerOid, 0, "You are now ignoring all characters named %s." % args[1])
else:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "No matches found for %s." % args[1])
else:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "No matches found for %s." % args[1])
else:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "Usage: /ignore playername")
#
# places specific /sys command
# determine admin status of caller, than calls into common/proxy.py
#
class FRW_SysCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
if isAdmin(accountId):
handleSysCommand(cmdEvent)
proxyPlugin.registerCommand("/setmesh", SetMeshCommand())
proxyPlugin.registerCommand("/playanimation", PlayAnimationCommand())
proxyPlugin.registerCommand("/dance", DanceCommand())
proxyPlugin.registerCommand("/gesture", GestureCommand())
proxyPlugin.registerCommand("/sit", SitCommand())
proxyPlugin.registerCommand("/gmmode", GMCommand())
proxyPlugin.registerCommand("/property", PropertyCommand())
proxyPlugin.registerCommand("/ignore", IgnoreCommand())
proxyPlugin.registerCommand("/sys", FRW_SysCommand())
class YesCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/yes: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_nod")
class NoCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/no: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_headshake")
class ShrugCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/shrug: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_shrug")
class LaughCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/laugh: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_laugh")
class WaveCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/wave: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_wave")
class BowCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/bow: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_bow")
class PointCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/point: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_point")
class ClapCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/clap: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_clap")
class CheerCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/cheer: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_cheer")
class AttitudeCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
cmd = cmdEvent.getCommand()
args = cmd.split()
animNum = None
if len(args) > 1:
try:
animNum = int(args[1])
except:
animNum = 1
else:
animNum = 1
if animNum > 3:
animNum = 1
Log.debug("/attitude: oid= %s; cmd=%s" % (str(playerOid), cmd))
AnimationClient.playSingleAnimation(playerOid, "ntrl_attd_%02d_idle_01" % animNum)
class SetTVUrlCommand(ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
tvOid = cmdEvent.getTarget()
cmd = cmdEvent.getCommand()
splitCmd = cmd.split(" ")
url = splitCmd[1]
if url != None and (url.startswith("http://") or url.startswith("mms://")):
WorldManagerClient.setObjectProperty(tvOid,"tv_url", url)
WorldManagerClient.sendObjChatMsg(playerOid, 0, "TV set to: " + url)
else:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "Please include http:// or mms:// in the address")
proxyPlugin.registerCommand("/yes", YesCommand())
proxyPlugin.registerCommand("/no", NoCommand())
proxyPlugin.registerCommand("/shrug", ShrugCommand())
proxyPlugin.registerCommand("/laugh", LaughCommand())
proxyPlugin.registerCommand("/wave", WaveCommand())
proxyPlugin.registerCommand("/bow", BowCommand())
proxyPlugin.registerCommand("/point", PointCommand())
proxyPlugin.registerCommand("/clap", ClapCommand())
proxyPlugin.registerCommand("/cheer", CheerCommand())
proxyPlugin.registerCommand("/attitude", AttitudeCommand())
proxyPlugin.registerCommand("/attd", AttitudeCommand())
proxyPlugin.registerCommand("/settvurl", SetTVUrlCommand())
def instanceSetObjectProperty(instanceOid, oid, namespace, key, value):
props = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps")
objInfo = WorldManagerClient.getObjectInfo(oid)
objName = objInfo.name # objInfo.getProperty("name")
objProps = None
if props.containsKey(objName):
objProps = props[objName]
else:
objProps = HashMap()
objProps[key] = value
props[objName] = objProps
EnginePlugin.setObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps", props)
######################
# Dynamic Instancing #
######################
class DynInstProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
props = event.getPropertyMap()
DynamicInstancing().handleRequest(props, player, proxy)
def setProfilePhotos(instanceOid):
roomItemsProps = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps")
roomOwnerId = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "AccountId")
roomStyle = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomStyle")
# get photo for room owner
photoURL = getDBProperty(roomOwnerId, "PhotoURL")
# get oid for profile_main
profileMain = roomStyle + "_profile_main"
profileMainOid = ObjectManagerClient.getNamedObject(instanceOid, profileMain, None)
Log.debug("[CYC] '%s' oid is %s" % (profileMain, profileMainOid))
if profileMainOid is None:
return
# set pic_url for profile
roomItemsProps = setObjectProperty(profileMainOid, Namespace.WORLD_MANAGER, "pic_url", photoURL, roomItemsProps)
# get friendlist
friendlist = getFriendlist(roomOwnerId)
i = 0
for friendId in friendlist:
# get photo
photoURL = getDBProperty(friendId, "PhotoURL")
# set pic_url for friendlist
i = i + 1
profileName = roomStyle + "_profile_%02d" % i
profileOid = ObjectManagerClient.getNamedObject(instanceOid, profileName, None)
Log.debug("[CYC] '%s' oid is %s" % (profileName, profileOid))
if profileOid is None:
return
roomItemsProps = setObjectProperty(profileOid, Namespace.WORLD_MANAGER, "pic_url", photoURL, roomItemsProps)
roomItemsProps = setObjectProperty(profileOid, Namespace.WORLD_MANAGER, "AccountId", friendId, roomItemsProps)
EnginePlugin.setObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps", roomItemsProps)
#
# Separate class allows instancing to be called outside the hook
# (i.e. kicking a player to the default instance).
#
class DynamicInstancing:
def handleRequest(self, props, player, proxy):
cmd = None
if props.containsKey("command"):
cmd = props["command"]
if cmd == "collectible":
self.addCollectible(props, player, proxy)
if (cmd == "instance") or (cmd == "load"):
Log.debug("processExtensionEvent (dyninst): cmd =" + cmd)
markerName = ""
if props.containsKey("markerName"):
markerName = props["markerName"]
else:
markerName = "spawnPt"
instanceName = ""
if props.containsKey("instanceName"):
instanceName = props["instanceName"]
owner = None
if props.containsKey("owner"):
owner = props["owner"]
db = Engine.getDatabase()
try:
accountId = int(owner)
except:
ownerOid = db.getOidByName(owner, Namespace.WORLD_MANAGER)
accountId = EnginePlugin.getObjectProperty(ownerOid, Namespace.WORLD_MANAGER, "AccountId")
instanceName = "room-" + str(accountId)
instanceOid = self.loadInstance(props, player, proxy, instanceName)
if instanceOid == None:
WorldManagerClient.sendObjChatMsg(player.getOid(), 0, "Player does not have a room.")
return
if (cmd == "instance"):
success = self.enterInstance(props, player, proxy, instanceName, markerName)
if success:
playerOid = player.getOid()
roomOwnerId = None # default instance
if owner is not None: # room instance
roomOwnerId = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "AccountId")
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "roomOwnerId", roomOwnerId)
def loadInstance(self, props, player, proxy, instanceName):
instanceOid = InstanceClient.getInstanceOid(instanceName)
if instanceOid is None:
Log.error("Error loading instance "+instanceName)
return None
while True:
result = InstanceClient.loadInstance(instanceOid)
if result != InstanceClient.RESULT_ERROR_RETRY:
break
time.sleep(1)
if result != InstanceClient.RESULT_OK:
Log.error("Error loading instance "+str(instanceOid)+", result "+str(result))
if instanceName.find("room-") == 0:
setProfilePhotos(instanceOid)
return instanceOid
def enterInstance(self, props, player, proxy, instanceName, markerName):
instanceOid = proxyPlugin.getInstanceEntryCallback().selectInstance(player,instanceName)
if instanceOid == None:
return False
if instanceName.find("room-") == 0:
setProfilePhotos(instanceOid)
if (instanceOid is not None):
loc = InstanceClient.getMarkerPoint(instanceOid, markerName)
wnode = BasicWorldNode()
wnode.setInstanceOid(instanceOid)
rand = Random()
newloc = Point((loc.getX() + (int(rand.nextFloat() * 4000.0) - 2000)),
(loc.getY()),
(loc.getZ() + (int(rand.nextFloat() * 4000.0) - 2000)))
wnode.setLoc(newloc)
wnode.setDir(MVVector(0,0,0))
return InstanceClient.objectInstanceEntry(player.getOid(),wnode,0)
return False
def addCollectible(self, props, player, proxy):
Log.debug("makeCollectible (dyninst): loveseat")
playerOid = player.getOid()
loc = Point(props["loc"])
dir = props["dir"]
meshname = props["mesh_name"]
itemname = props["item_name"]
pWNode = WorldManagerClient.getWorldNode(playerOid)
instanceOid = pWNode.getInstanceOid()
iInfo = InstanceClient.getInstanceInfo(instanceOid, InstanceClient.FLAG_NAME)
dc = DisplayContext(meshname, True)
ot = Template("furniture") # template name
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_DISPLAY_CONTEXT, dc)
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_NAME, itemname)
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_INSTANCE, Long(instanceOid)) # -- instance OID
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_LOC, loc) # player location + 2m in the Z-axis
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_ORIENT, dir) # player orientation
# ot.put(Namespace.WORLD_MANAGER, "Targetable", Boolean(True))
# ot.put(Namespace.WORLD_MANAGER, "ClickHookName", "furniture_menu")
ot.put(Namespace.OBJECT_MANAGER, ObjectManagerClient.TEMPL_PERSISTENT, Boolean(True))
objectOid = ObjectManagerClient.generateObject("furniture", ot) # template name
rv = WorldManagerClient.spawn(objectOid)
Log.debug("dynamic instance: generated obj oid = " + str(objectOid))
return objectOid
proxyPlugin.addProxyExtensionHook("proxy.DYNAMIC_INSTANCE", DynInstProxyExtHook())
class PlacesInstanceEntryCallback (InstanceEntryCallback):
def instanceEntryAllowed(self, playerOid, instanceOid, location):
Log.info("PlacesInstanceEntryCallback: playerOid="+str(playerOid)+" "+
"instanceOid="+str(instanceOid)+" loc="+str(location))
info = None
# Get the instance name. In the case of a room, we can extract
# the owner's account id.
instanceName = Engine.getDatabase().getObjectName(instanceOid, InstanceClient.NAMESPACE)
if instanceName == None:
info = InstanceClient.getInstanceInfo(instanceOid,
InstanceClient.FLAG_PLAYER_POPULATION | InstanceClient.FLAG_NAME)
if info == None or info.name == None:
Log.debug("PlacesInstanceEntryCallback: Could not get instance information for instanceOid="+str(instanceOid))
return False
instanceName = info.name
if instanceName.find("room-") != 0:
return True
ownerAccountId = instanceName[5:]
# Get the player's account id
playerAccountId = EnginePlugin.getObjectProperty(playerOid, Namespace.OBJECT_MANAGER, "AccountId")
# HACK for backward compatibility: if no AccountId, then allow
if playerAccountId == None:
return True
# Player can always enter their own room
if playerAccountId == int(ownerAccountId):
return True
if not self.playerAllowedEntry(ownerAccountId, playerAccountId):
Log.debug("PlacesInstanceEntryCallback: playerAllowed returned false for accountId " + str(playerAccountId))
WorldManagerClient.sendObjChatMsg(playerOid, 0, "Privacy settings for room '" + instanceName + "' don't allow you to enter")
return False
# Get instance population and check limit
if info == None:
info = InstanceClient.getInstanceInfo(instanceOid, InstanceClient.FLAG_PLAYER_POPULATION)
limit = EnginePlugin.getObjectProperty(instanceOid, InstanceClient.NAMESPACE, "populationLimit")
if limit == None:
limit = ROOM_PLAYER_LIMIT
if info.playerPopulation >= limit:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "Room is full, try again later.")
Log.info("ProxyPlugin: INSTANCE_FULL playerOid=" + str(playerOid) +
" instanceOid=" + str(instanceOid) +
" ownerAccountId=" + str(ownerAccountId) +
" limit=" + str(limit))
return False
else:
return True
return True
def playerAllowedEntry(self, ownerAccountId, friendAccountId):
privacy_setting = "Anyone"
is_friend = 0
logPrefix = "playerAllowedEntry: For ownerAccountId " + str(ownerAccountId) + " and friendAccountId " + str(friendAccountId)
sql = "SELECT p.value, IF (EXISTS (SELECT 1 FROM friends AS f WHERE f.my_id = %d AND f.friend_id = %d) ,1,0) AS is_friend FROM profile AS p WHERE p.account_id = %d AND p.property = 'Privacy'" % (ownerAccountId, friendAccountId, ownerAccountId)
try:
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
# Get a row with two columns: the value of the 'Privacy' property for the profile table, and whether friendIs is a friend
con = DriverManager.getConnection(url)
stm = con.createStatement()
srs = stm.executeQuery(sql)
if (srs.next()):
privacy_setting = srs.getString("value")
is_friend = srs.getInt("is_friend")
#Log.debug(logPrefix + privacy_setting + " and is_friend = " + str(is_friend))
else:
# If there were no rows returned, that means we should use the default value of "Anyone"
#Log.debug(logPrefix + ", didn't find a 'Privacy' row in the properties table")
privacy_setting = "Anyone"
srs.close()
stm.close()
con.close()
except:
Log.debug("playerAllowedEntry: Got exception running database query to retrieve privacy permission for account " +
str(ownerAccountId) + ", sql is " + sql + ", exception " + str(sys.exc_info()[0]))
if privacy_setting == "Anyone":
Log.debug(logPrefix + ", allowing entry because the privacy setting is 'Anyone'")
return True
if (privacy_setting == "Friends"):
if is_friend == 1:
Log.debug(logPrefix + ", allowing entry because the privacy setting is 'Friends' and he is a friend")
return True
else:
Log.debug(logPrefix + ", not allowing entry because the privacy setting is 'Friends' and he is not a friend")
return False
else:
Log.debug(logPrefix + ", not allowing entry because the privacy setting is '" + privacy_setting + "'")
return False
def selectInstance(self,player,instanceName):
infos = InstanceClient.getInstanceInfoByName(instanceName,
InstanceClient.FLAG_PLAYER_POPULATION)
if infos.size() == 0:
Log.error("PlacesInstanceEntryCallback: unknown instance name " +
instanceName)
return None
if infos.size() == 1:
return infos.get(0).oid
selected = None
for info in infos:
if selected == None or info.playerPopulation > selected.playerPopulation:
limit = EnginePlugin.getObjectProperty(info.oid,
InstanceClient.NAMESPACE, "populationLimit")
if limit == None:
limit = ROOM_PLAYER_LIMIT
if info.playerPopulation < limit:
selected = info
if selected != None:
return selected.oid
else:
Log.error("PlacesInstanceEntryCallback: all instances full name=" +
instanceName)
return None
proxyPlugin.setInstanceEntryCallback(PlacesInstanceEntryCallback())
#####
#
#####
def setObjectProperty(oid, namespace, key, value, props):
objInfo = WorldManagerClient.getObjectInfo(oid)
objName = objInfo.name # objInfo.getProperty("name")
objProps = None
if props.containsKey(objName):
objProps = props[objName]
else:
objProps = HashMap()
objProps[key] = value
props[objName] = objProps
EnginePlugin.setObjectProperty(oid, namespace, key, value)
return props
class SetPropertyProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
playerOid = player.getOid()
pWNode = WorldManagerClient.getWorldNode(playerOid)
instanceOid = pWNode.getInstanceOid()
# security check -- check if player is instance owner
isOwner = False
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
instanceName = InstanceClient.getInstanceInfo(instanceOid, InstanceClient.FLAG_NAME).name
instanceOwnerStr = instanceName[instanceName.index('-')+1:]
instanceOwner = Integer.parseInt(instanceOwnerStr)
if instanceOwner == accountId:
isOwner = True
props = event.getPropertyMap()
roomItemsProps = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps")
if 'tv_url' in props.keySet():
oid = props['oid']
url = props['tv_url']
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "tv_url", url, roomItemsProps)
if 'radio_url' in props.keySet():
oid = props['oid']
url = props['radio_url']
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "radio_url", url, roomItemsProps)
if 'pic_url' in props.keySet() and isOwner:
oid = props['oid']
url = props['pic_url']
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "pic_url", url, roomItemsProps)
if 'cd_url' in props.keySet() and isOwner:
oid = props['oid']
url = props['cd_url']
name = props['tooltip']
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "cd_url", url, roomItemsProps)
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "tooltip", name, roomItemsProps)
if 'subsurface' in props.keySet() and isOwner:
objOid = props['oid']
subsurfaceName = props['subsurface']
subsurface = props['value']
roomItemsProps = setObjectProperty(objOid, Namespace.WORLD_MANAGER, subsurfaceName, subsurface, roomItemsProps)
roomItemsProps = setObjectProperty(objOid, Namespace.WORLD_MANAGER, 'AppearanceOverride', 'coloredfurniture', roomItemsProps)
######
# if 'hide' in props.keySet():
# for pair in props['hide']:
# roomItemsProps = self.setObjectProperty(pair[0], Namespace.WORLD_MANAGER, 'Hide', Boolean(pair[1]), roomItemsProps)
# if 'style' in props.keySet():
# objOid = props['oid']
# style = props['style']
# roomItemsProps = self.setObjectProperty(objOid, Namespace.WORLD_MANAGER, 'RoomStyle', style, roomItemsProps)
######
EnginePlugin.setObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps", roomItemsProps)
proxyPlugin.addProxyExtensionHook("mv.SET_PROPERTY", SetPropertyProxyExtHook())
#
# convenience function used solely to determine whether the SELECT
# finds a match - note we append "LIMIT 1" to the passed query, to
# return only a single match
#
# returns True (there was a match), or False (there were no matches)
#
def doesQueryMatch(sql):
result = False
url = "jdbc:mysql://%s/friendworld?user=root&password=test" % webdb_host
con = DriverManager.getConnection(url)
stm = con.createStatement()
sql = "%s LIMIT 1" % sql
res = stm.executeQuery(sql)
if res.next():
result = True
stm.close()
con.close()
return result
#
# convenience function used to perform an INSERT, UPDATE or DELETE
# on the web database
#
# returns number of rows affected by the update
#
def updateDatabase(sql):
result = 0
url = "jdbc:mysql://%s/friendworld?user=root&password=test" % webdb_host
con = DriverManager.getConnection(url)
stm = con.createStatement()
result = stm.executeUpdate(sql)
stm.close()
con.close()
return result
class AddFriendProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
Log.debug("[CYC] add friend proxy hook")
playerOid = player.getOid()
pWNode = WorldManagerClient.getWorldNode(playerOid)
instanceOid = pWNode.getInstanceOid()
props = event.getPropertyMap()
friendAccountId = None
if props.containsKey('friend_id'):
friendAccountId = props['friend_id']
friendOid = None
if props.containsKey('friend_oid'):
friendOid = props['friend_oid']
myAccountId = None
if props.containsKey('account_id'):
myAccountId = props['account_id']
Log.debug("[CYC] %s, %s, %s" % (friendAccountId, friendOid, myAccountId))
if friendAccountId is None or friendOid is None or myAccountId is None:
return
#
# so we can provide the player with useful feedback
#
friendName = proxyPlugin.getPlayer(friendOid).name
#
# don't add a friend invite if...
#
# we're already friends
if doesQueryMatch("SELECT friend_id FROM friends WHERE my_id = %d AND friend_id = %d" % (myAccountId, friendAccountId)):
WorldManagerClient.sendObjChatMsg(playerOid, 2, "You're already friends with %s." % friendName)
return
# i've already invited this person to become friends
haveInvited = doesQueryMatch("SELECT to_id, from_id FROM invitations WHERE to_id = %d AND from_id = %d" % (friendAccountId, myAccountId))
if haveInvited:
WorldManagerClient.sendObjChatMsg(playerOid, 2, "You've already sent %s a friend request." % friendName)
return
#
# if this person has previously invited me to become friends,
# treat 'add friend' as a confirmation - add as friend, and
# remove any mutual invitations
#
if doesQueryMatch("SELECT to_id, from_id FROM invitations WHERE to_id = %d AND from_id = %d" % (myAccountId, friendAccountId)):
result = updateDatabase("INSERT INTO friends (my_id, friend_id, timestamp) VALUES (%d, %d, NOW())" % (myAccountId, friendAccountId))
result = updateDatabase("INSERT INTO friends (my_id, friend_id, timestamp) VALUES (%d, %d, NOW())" % (friendAccountId, myAccountId))
result = updateDatabase("DELETE FROM invitations WHERE to_id = %d AND from_id = %d" % (myAccountId, friendAccountId))
if haveInvited:
result = updateDatabase("DELETE FROM invitations WHERE to_id = %d AND from_id = %d" % (friendAccountId, myAccountId))
WorldManagerClient.sendObjChatMsg(playerOid, 2, "You are now friends with %s." % friendName)
return
Log.debug("[CYC] adding friend ... db call")
# Add friend
message = ""
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
sql = "INSERT INTO invitations (to_id, from_id, message, timestamp) VALUES (%s, %s, '%s', NOW())" % (friendAccountId, myAccountId, message)
con = DriverManager.getConnection(url)
stm = con.createStatement()
res = stm.executeUpdate(sql)
Log.debug("[CYC] add friend insert result = %d" % res)
stm.close()
con.close()
Log.debug("[CYC] sending friend request message")
# Send friend message
WorldManagerClient.sendObjChatMsg(playerOid, 2, "You have sent a friend request to %s." % friendName)
WorldManagerClient.sendObjChatMsg(friendOid, 2, "You have a new friend request from %s." % player.name)
proxyPlugin.addProxyExtensionHook("mvp.ADD_FRIEND", AddFriendProxyExtHook())
class KickPlayerProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
playerOid = player.getOid()
# get player's accountId
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
Log.debug("KickHook: kick request from playerOid=%d, accountId=%d" % (playerOid, accountId))
# get room's ownerId
roomOwnerId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "roomOwnerId")
# kicking player must own the room or be an admin
adminState = str(getDBProperty(accountId, "Admin"))
if accountId != roomOwnerId and adminState != "True":
WorldManagerClient.sendObjChatMsg(playerOid, 2, "Sorry, you don't have permission to kick that player.")
return
# validate kick target
props = event.getPropertyMap()
kickOid = None
if props.containsKey('oid'):
kickOid = props['oid']
if kickOid is None:
return
# don't let owner be kicked from their own room
kickAccountId = EnginePlugin.getObjectProperty(kickOid, Namespace.WORLD_MANAGER, "AccountId")
if kickAccountId == roomOwnerId:
WorldManagerClient.sendObjChatMsg(playerOid, 2, "Sorry, can't kick a player from their own room.")
return
# bad target, go away!
props = HashMap()
props.put("command", "instance")
props.put("instanceName", "default")
props.put("markerName", "spawnPt")
kickedPlayer = proxyPlugin.getPlayer(kickOid)
Log.debug("KickHook: kicking kickOid=%d (%s)" % (kickOid, kickedPlayer.name))
DynamicInstancing().handleRequest(props, kickedPlayer, proxy)
WorldManagerClient.sendObjChatMsg(playerOid, 2, "%s has been kicked from the room." % kickedPlayer.name)
WorldManagerClient.sendObjChatMsg(kickOid, 2, "You have been kicked from the room.")
proxyPlugin.addProxyExtensionHook("mvp.KICK_FROM_ROOM", KickPlayerProxyExtHook())
proxyPlugin.addProxyExtensionHook("proxy.INSTANCE_ENTRY", InstanceEntryProxyHook())
def getDBProperty(accountId, property):
value = None
try:
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
sql = "SELECT value FROM profile WHERE account_id = %d AND property = '%s'" % ( accountId, property)
con = DriverManager.getConnection(url)
stm = con.createStatement()
srs = stm.executeQuery(sql)
# _types = {Types.INTEGER:srs.getInt, Types.FLOAT:srs.getFloat}
while (srs.next()):
value = srs.getString(1)
srs.close()
stm.close()
con.close()
except:
Log.debug("getDBProperty(): Exception")
pass
if value is None:
if property == "PhotoURL":
# value = places_url + "images/missing.jpg"
value = places_url + "photos/%08d.jpg" % accountId
else:
value = "Unknown"
Log.debug("getDBProperty(): accountId=%d, property=%s, value=%s" % (accountId, property, value))
return value
#
# Simple test to see if the player is an admin.
#
def isAdmin(accountId):
result = False
state = getDBProperty(accountId, "Admin")
if state == "True":
result = True
return result
def getFriendlist(accountId):
friendList = LinkedList()
try:
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
sql = "SELECT friend_id FROM friends WHERE my_id = %d LIMIT 12" % accountId
con = DriverManager.getConnection(url)
stm = con.createStatement()
srs = stm.executeQuery(sql)
# _types = {Types.INTEGER:srs.getInt, Types.FLOAT:srs.getFloat}
while (srs.next()):
friend_id = srs.getInt(1)
friendList.add(str(friend_id))
srs.close()
stm.close()
con.close()
except:
friendList.add(1)
# friendList.add(2156)
# friendList.add(7811)
return friendList
def getPlaylist(accountId):
playList = LinkedList()
try:
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
sql = "SELECT name, URL FROM media WHERE account_id = %d AND media_type=1" % accountId
con = DriverManager.getConnection(url)
stm = con.createStatement()
srs = stm.executeQuery(sql)
# _types = {Types.INTEGER:srs.getInt, Types.FLOAT:srs.getFloat}
while (srs.next()):
name = srs.getString(1)
url = srs.getString(2)
nvpair = LinkedList()
nvpair.add(name)
nvpair.add(url)
playList.add(nvpair)
srs.close()
stm.close()
con.close()
except:
nvpair = LinkedList()
nvpair.add("Slick Rick 1")
nvpair.add("http://www.tradebit.com/usr/scheme05/pub/8/Chamillionaire-feat.-Slick-Rick---Hip-Hop-Police.mp3")
playList.add(nvpair)
return playList
class GetPropertyProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
props = event.getPropertyMap()
oid = None
if props.containsKey("oid"):
oid = props["oid"]
else:
oid = player.getOid()
accountId = None
if props.containsKey("account_id"):
accountId = props["account_id"]
else:
accountId = EnginePlugin.getObjectProperty(oid, Namespace.WORLD_MANAGER, "AccountId")
if accountId is None:
accountId = EnginePlugin.getObjectProperty(oid, Namespace.WORLD_MANAGER, "roomOwnerId")
propKey = None
if props.containsKey("property_name"):
propKey = props["property_name"]
else:
propKey = "PhotoURL"
cmd = None
if props.containsKey("cmd"):
cmd = props["cmd"]
if (accountId is not None) and (oid is not None):
if (cmd == "property"):
propValue = getDBProperty(accountId, propKey)
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, propKey, propValue)
if (cmd == "friendlist"):
friend_list = getFriendlist(accountId)
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, "friendlist", friend_list)
if (cmd == "playlist"):
play_list = getPlaylist(accountId)
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, "playlist", play_list)
if (cmd == "roomstyle"):
room_style = EnginePlugin.getObjectProperty(Instance.currentOid(), Namespace.INSTANCE, "RoomStyle")
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, "roomstyle", room_style)
if (cmd == "room_owner_id"):
roomOwnerId = EnginePlugin.getObjectProperty(Instance.currentOid(), Namespace.INSTANCE, "AccountId")
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, "roomOwnerId", roomOwnerId)
proxyPlugin.addProxyExtensionHook("mv.GET_PROPERTY", GetPropertyProxyExtHook())
class UpdateObjectProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
props = event.getPropertyMap()
dir = None
if props.containsKey("dir"):
dir = props["dir"]
transition = None
if props.containsKey("transition"):
transition = props["transition"]
idle = None
if props.containsKey("idle"):
idle = props["idle"]
loc_start = None
if props.containsKey("loc_start"):
loc_start = props["loc_start"]
if (transition is not None) and (idle is not None) and (loc_start is not None):
wnode_start = BasicWorldNode()
wnode_start.setLoc(Point(loc_start))
wnode_start.setOrientation(dir)
playerOid = player.getOid()
WorldManagerClient.updateWorldNode(playerOid, wnode_start, True)
AnimationClient.playSingleAnimation(playerOid, transition)
# wnode_end = BasicWorldNode()
# wnode_end.setLoc(Point(loc_end))
# wnode_end.setOrientation(dir)
# WorldManagerClient.updateWorldNode(playerOid, wnode_end, True)
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitidle", idle)
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(True))
if props.containsKey("property"):
oid = None
if props.containsKey("oid"):
oid = props["oid"]
property = props["property"]
value = None
if props.containsKey("value"):
value = props["value"]
if (oid is not None) and (property is not None) and (value is not None):
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, property, value)
proxyPlugin.addProxyExtensionHook("mv.UPDATE_OBJECT", UpdateObjectProxyExtHook())
class PlacesLoginCallback (ProxyLoginCallback):
def preLoad(self, player, conn):
pass
def postLoad(self, player, conn):
#
# setting "isAdmin" on the player object will let us appropriately
# update UI elements on the client where only admins should be able
# to perform an operation - note that this should only be used for
# UI, no to determine permission to perform an operation - admin
# requests should *ALWAYS* be confirmed on the world server
#
playerOid = player.getOid()
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "isAdmin", isAdmin(accountId))
def postSpawn(self, player, conn):
Log.debug("[CYC] postSpawn")
playerOid = player.getOid()
pWNode = WorldManagerClient.getWorldNode(playerOid)
instanceOid = pWNode.getInstanceOid()
iInfo = InstanceClient.getInstanceInfo(instanceOid, InstanceClient.FLAG_NAME)
instanceName = iInfo.name
if instanceName.find("room-") == 0:
setProfilePhotos(instanceOid)
roomOwnerId = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "AccountId")
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "roomOwnerId", roomOwnerId)
proxyPlugin.setProxyLoginCallback(PlacesLoginCallback())
def generateToken(props=None):
expiry = System.currentTimeMillis() + TOKEN_LIFE
if props is None:
tokenSpec = SecureTokenSpec(SecureTokenSpec.TOKEN_TYPE_DOMAIN, AGENT_NAME, expiry)
else:
tokenSpec = SecureTokenSpec(SecureTokenSpec.TOKEN_TYPE_DOMAIN, AGENT_NAME, expiry, props)
token = SecureTokenManager.getInstance().generateToken(tokenSpec)
return token
class GenerateTokenProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
playerOid = player.getOid()
eventProps = event.getPropertyMap()
if not 'frameName' in eventProps or not 'jspArgs' in eventProps:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "GTPExtHook request failed: Bad data passed to server.")
return
# get player's accountId
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
Log.debug("GenerateTokenHook: token requested by playerOid=%d, accountId=%d" % (playerOid, accountId))
props = HashMap()
props.put("accountId", accountId)
token = generateToken(props=props)
token64 = Base64.encodeBytes(token, Base64.URL_SAFE)
Log.debug("GenerateTokenHook: token64 = %s" % token64)
msg = WorldManagerClient.TargetedExtensionMessage("mvp.TOKEN_GENERATED", playerOid)
msgProps = msg.getPropertyMapRef()
# need to send these back to the client
jspArgs = eventProps['jspArgs']
jspArgs = "%s&host=%s&token=%s" % (jspArgs, domainHostName, token64)
msgProps.put("jspArgs", jspArgs)
msgProps.put("frameName", eventProps['frameName'])
Engine.getAgent().sendBroadcast(msg)
proxyPlugin.addProxyExtensionHook("mvp.GENERATE_TOKEN", GenerateTokenProxyExtHook())
| mit |
resba/gnuradio | grc/base/FlowGraph.py | 28 | 7583 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from . import odict
from Element import Element
from .. gui import Messages
class FlowGraph(Element):
def __init__(self, platform):
"""
Make a flow graph from the arguments.
@param platform a platforms with blocks and contrcutors
@return the flow graph object
"""
#initialize
Element.__init__(self, platform)
#inital blank import
self.import_data()
def _get_unique_id(self, base_id=''):
"""
Get a unique id starting with the base id.
@param base_id the id starts with this and appends a count
@return a unique id
"""
index = 0
while True:
id = '%s_%d'%(base_id, index)
index = index + 1
#make sure that the id is not used by another block
if not filter(lambda b: b.get_id() == id, self.get_blocks()): return id
def __str__(self): return 'FlowGraph - %s(%s)'%(self.get_option('title'), self.get_option('id'))
def get_option(self, key):
"""
Get the option for a given key.
The option comes from the special options block.
@param key the param key for the options block
@return the value held by that param
"""
return self._options_block.get_param(key).get_evaluated()
def is_flow_graph(self): return True
##############################################
## Access Elements
##############################################
def get_block(self, id): return filter(lambda b: b.get_id() == id, self.get_blocks())[0]
def get_blocks(self): return filter(lambda e: e.is_block(), self.get_elements())
def get_connections(self): return filter(lambda e: e.is_connection(), self.get_elements())
def get_children(self): return self.get_elements()
def get_elements(self):
"""
Get a list of all the elements.
Always ensure that the options block is in the list (only once).
@return the element list
"""
options_block_count = self._elements.count(self._options_block)
if not options_block_count:
self._elements.append(self._options_block)
for i in range(options_block_count-1):
self._elements.remove(self._options_block)
return self._elements
def get_enabled_blocks(self):
"""
Get a list of all blocks that are enabled.
@return a list of blocks
"""
return filter(lambda b: b.get_enabled(), self.get_blocks())
def get_enabled_connections(self):
"""
Get a list of all connections that are enabled.
@return a list of connections
"""
return filter(lambda c: c.get_enabled(), self.get_connections())
def get_new_block(self, key):
"""
Get a new block of the specified key.
Add the block to the list of elements.
@param key the block key
@return the new block or None if not found
"""
if key not in self.get_parent().get_block_keys(): return None
block = self.get_parent().get_new_block(self, key)
self.get_elements().append(block)
return block
def connect(self, porta, portb):
"""
Create a connection between porta and portb.
@param porta a port
@param portb another port
@throw Exception bad connection
@return the new connection
"""
connection = self.get_parent().Connection(flow_graph=self, porta=porta, portb=portb)
self.get_elements().append(connection)
return connection
def remove_element(self, element):
"""
Remove the element from the list of elements.
If the element is a port, remove the whole block.
If the element is a block, remove its connections.
If the element is a connection, just remove the connection.
"""
if element not in self.get_elements(): return
#found a port, set to parent signal block
if element.is_port():
element = element.get_parent()
#remove block, remove all involved connections
if element.is_block():
for port in element.get_ports():
map(self.remove_element, port.get_connections())
self.get_elements().remove(element)
def evaluate(self, expr):
"""
Evaluate the expression.
@param expr the string expression
@throw NotImplementedError
"""
raise NotImplementedError
##############################################
## Import/Export Methods
##############################################
def export_data(self):
"""
Export this flow graph to nested data.
Export all block and connection data.
@return a nested data odict
"""
import time
n = odict()
n['timestamp'] = time.ctime()
n['block'] = [block.export_data() for block in self.get_blocks()]
n['connection'] = [connection.export_data() for connection in self.get_connections()]
return odict({'flow_graph': n})
def import_data(self, n=None):
"""
Import blocks and connections into this flow graph.
Clear this flowgraph of all previous blocks and connections.
Any blocks or connections in error will be ignored.
@param n the nested data odict
"""
#remove previous elements
self._elements = list()
#use blank data if none provided
fg_n = n and n.find('flow_graph') or odict()
blocks_n = fg_n.findall('block')
connections_n = fg_n.findall('connection')
#create option block
self._options_block = self.get_parent().get_new_block(self, 'options')
#build the blocks
for block_n in blocks_n:
key = block_n.find('key')
if key == 'options': block = self._options_block
else: block = self.get_new_block(key)
#only load the block when the block key was valid
if block: block.import_data(block_n)
else: Messages.send_error_load('Block key "%s" not found in %s'%(key, self.get_parent()))
#build the connections
for connection_n in connections_n:
#try to make the connection
try:
#get the block ids
source_block_id = connection_n.find('source_block_id')
sink_block_id = connection_n.find('sink_block_id')
#get the port keys
source_key = connection_n.find('source_key')
sink_key = connection_n.find('sink_key')
#verify the blocks
block_ids = map(lambda b: b.get_id(), self.get_blocks())
if source_block_id not in block_ids:
raise LookupError('source block id "%s" not in block ids'%source_block_id)
if sink_block_id not in block_ids:
raise LookupError('sink block id "%s" not in block ids'%sink_block_id)
#get the blocks
source_block = self.get_block(source_block_id)
sink_block = self.get_block(sink_block_id)
#verify the ports
if source_key not in source_block.get_source_keys():
raise LookupError('source key "%s" not in source block keys'%source_key)
if sink_key not in sink_block.get_sink_keys():
raise LookupError('sink key "%s" not in sink block keys'%sink_key)
#get the ports
source = source_block.get_source(source_key)
sink = sink_block.get_sink(sink_key)
#build the connection
self.connect(source, sink)
except LookupError, e: Messages.send_error_load(
'Connection between %s(%s) and %s(%s) could not be made.\n\t%s'%(
source_block_id, source_key, sink_block_id, sink_key, e
)
)
self.rewrite() #global rewrite
| gpl-3.0 |
cwilkes/event_store_meta | tests/test_models.py | 1 | 1689 | # -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from event_store_meta.user.models import User, Role
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self):
user = User('foo', 'foo@bar.com')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert user.password is None
def test_factory(self, db):
user = UserFactory(password="myprecious")
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
user = User.create(username="foo", email="foo@bar.com",
password="foobarbaz123")
assert user.check_password('foobarbaz123') is True
assert user.check_password("barfoobaz") is False
def test_full_name(self):
user = UserFactory(first_name="Foo", last_name="Bar")
assert user.full_name == "Foo Bar"
def test_roles(self):
role = Role(name='admin')
role.save()
u = UserFactory()
u.roles.append(role)
u.save()
assert role in u.roles
| bsd-3-clause |
pv/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
xrmx/django | django/conf/locale/nb/formats.py | 504 | 1766 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
hazrpg/calibre | src/calibre/devices/mtp/filesystem_cache.py | 14 | 8300 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import weakref, sys, json
from collections import deque
from operator import attrgetter
from future_builtins import map
from datetime import datetime
from calibre import human_readable, prints, force_unicode
from calibre.utils.date import local_tz, as_utc
from calibre.utils.icu import sort_key, lower
from calibre.ebooks import BOOK_EXTENSIONS
bexts = frozenset(BOOK_EXTENSIONS) - {'mbp', 'tan', 'rar', 'zip', 'xml'}
class FileOrFolder(object):
def __init__(self, entry, fs_cache):
self.all_storage_ids = fs_cache.all_storage_ids
self.object_id = entry['id']
self.is_folder = entry['is_folder']
self.storage_id = entry['storage_id']
# self.parent_id is None for storage objects
self.parent_id = entry.get('parent_id', None)
n = entry.get('name', None)
if not n: n = '___'
self.name = force_unicode(n, 'utf-8')
self.persistent_id = entry.get('persistent_id', self.object_id)
self.size = entry.get('size', 0)
md = entry.get('modified', 0)
try:
if isinstance(md, tuple):
self.last_modified = datetime(*(list(md)+[local_tz]))
else:
self.last_modified = datetime.fromtimestamp(md, local_tz)
except:
self.last_modified = datetime.fromtimestamp(0, local_tz)
self.last_mod_string = self.last_modified.strftime('%Y/%m/%d %H:%M')
self.last_modified = as_utc(self.last_modified)
if self.storage_id not in self.all_storage_ids:
raise ValueError('Storage id %s not valid for %s, valid values: %s'%(self.storage_id,
entry, self.all_storage_ids))
if self.parent_id == 0:
self.parent_id = self.storage_id
self.is_hidden = entry.get('is_hidden', False)
self.is_system = entry.get('is_system', False)
self.can_delete = entry.get('can_delete', True)
self.files = []
self.folders = []
fs_cache.id_map[self.object_id] = self
self.fs_cache = weakref.ref(fs_cache)
self.deleted = False
if self.storage_id == self.object_id:
self.storage_prefix = 'mtp:::%s:::'%self.persistent_id
self.is_ebook = (not self.is_folder and
self.name.rpartition('.')[-1].lower() in bexts)
def __repr__(self):
name = 'Folder' if self.is_folder else 'File'
try:
path = unicode(self.full_path)
except:
path = ''
datum = 'size=%s'%(self.size)
if self.is_folder:
datum = 'children=%s'%(len(self.files) + len(self.folders))
return '%s(id=%s, storage_id=%s, %s, path=%s, modified=%s)'%(name, self.object_id,
self.storage_id, datum, path, self.last_mod_string)
__str__ = __repr__
__unicode__ = __repr__
@property
def empty(self):
return not self.files and not self.folders
@property
def id_map(self):
return self.fs_cache().id_map
@property
def parent(self):
return None if self.parent_id is None else self.id_map[self.parent_id]
@property
def full_path(self):
parts = deque()
parts.append(self.name)
p = self.parent
while p is not None:
parts.appendleft(p.name)
p = p.parent
return tuple(parts)
def __iter__(self):
for e in self.folders:
yield e
for e in self.files:
yield e
def add_child(self, entry):
ans = FileOrFolder(entry, self.fs_cache())
t = self.folders if ans.is_folder else self.files
t.append(ans)
return ans
def remove_child(self, entry):
for x in (self.files, self.folders):
try:
x.remove(entry)
except ValueError:
pass
self.id_map.pop(entry.object_id, None)
entry.deleted = True
def dump(self, prefix='', out=sys.stdout):
c = '+' if self.is_folder else '-'
data = ('%s children'%(sum(map(len, (self.files, self.folders))))
if self.is_folder else human_readable(self.size))
data += ' modified=%s'%self.last_mod_string
line = '%s%s %s [id:%s %s]'%(prefix, c, self.name, self.object_id, data)
prints(line, file=out)
for c in (self.folders, self.files):
for e in sorted(c, key=lambda x:sort_key(x.name)):
e.dump(prefix=prefix+' ', out=out)
def folder_named(self, name):
name = lower(name)
for e in self.folders:
if e.name and lower(e.name) == name:
return e
return None
def file_named(self, name):
name = lower(name)
for e in self.files:
if e.name and lower(e.name) == name:
return e
return None
def find_path(self, path):
'''
Find a path in this folder, where path is a
tuple of folder and file names like ('eBooks', 'newest',
'calibre.epub'). Finding is case-insensitive.
'''
parent = self
components = list(path)
while components:
child = components[0]
components = components[1:]
c = parent.folder_named(child)
if c is None:
c = parent.file_named(child)
if c is None:
return None
parent = c
return parent
@property
def mtp_relpath(self):
return tuple(x.lower() for x in self.full_path[1:])
@property
def mtp_id_path(self):
return 'mtp:::' + json.dumps(self.object_id) + ':::' + '/'.join(self.full_path)
class FilesystemCache(object):
def __init__(self, all_storage, entries):
self.entries = []
self.id_map = {}
self.all_storage_ids = tuple(x['id'] for x in all_storage)
for storage in all_storage:
storage['storage_id'] = storage['id']
e = FileOrFolder(storage, self)
self.entries.append(e)
self.entries.sort(key=attrgetter('object_id'))
all_storage_ids = [x.storage_id for x in self.entries]
self.all_storage_ids = tuple(all_storage_ids)
for entry in entries:
FileOrFolder(entry, self)
for item in self.id_map.itervalues():
try:
p = item.parent
except KeyError:
# Parent does not exist, set the parent to be the storage
# object
sid = item.storage_id
if sid not in all_storage_ids:
sid = all_storage_ids[0]
item.parent_id = sid
p = item.parent
if p is not None:
t = p.folders if item.is_folder else p.files
t.append(item)
def dump(self, out=sys.stdout):
for e in self.entries:
e.dump(out=out)
def storage(self, storage_id):
for e in self.entries:
if e.storage_id == storage_id:
return e
def iterebooks(self, storage_id):
for x in self.id_map.itervalues():
if x.storage_id == storage_id and x.is_ebook:
if x.parent_id == storage_id and x.name.lower().endswith('.txt'):
continue # Ignore .txt files in the root
yield x
def __len__(self):
return len(self.id_map)
def resolve_mtp_id_path(self, path):
if not path.startswith('mtp:::'):
raise ValueError('%s is not a valid MTP path'%path)
parts = path.split(':::')
if len(parts) < 3:
raise ValueError('%s is not a valid MTP path'%path)
try:
object_id = json.loads(parts[1])
except:
raise ValueError('%s is not a valid MTP path'%path)
try:
return self.id_map[object_id]
except KeyError:
raise ValueError('No object found with MTP path: %s'%path)
| gpl-3.0 |
Shrhawk/edx-platform | common/lib/capa/capa/capa_problem.py | 10 | 36556 | #
# File: capa/capa_problem.py
#
# Nomenclature:
#
# A capa Problem is a collection of text and capa Response questions.
# Each Response may have one or more Input entry fields.
# The capa problem may include a solution.
#
"""
Main module which shows problems (of "capa" type).
This is used by capa_module.
"""
from copy import deepcopy
from datetime import datetime
import logging
import os.path
import re
from lxml import etree
from pytz import UTC
from xml.sax.saxutils import unescape
from capa.correctmap import CorrectMap
import capa.inputtypes as inputtypes
import capa.customrender as customrender
import capa.responsetypes as responsetypes
from capa.util import contextualize_text, convert_files_to_filenames
import capa.xqueue_interface as xqueue_interface
from capa.safe_exec import safe_exec
# extra things displayed after "show answers" is pressed
solution_tags = ['solution']
# these get captured as student responses
response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
# special problem tags which should be turned into innocuous HTML
html_transforms = {
'problem': {'tag': 'div'},
'text': {'tag': 'span'},
'math': {'tag': 'span'},
}
# These should be removed from HTML output, including all subelements
html_problem_semantics = [
"codeparam",
"responseparam",
"answer",
"script",
"hintgroup",
"openendedparam",
"openendedrubric",
]
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# main class for this module
class LoncapaSystem(object):
"""
An encapsulation of resources needed from the outside.
These interfaces are collected here so that a caller of LoncapaProblem
can provide these resources however make sense for their environment, and
this code can remain independent.
Attributes:
i18n: an object implementing the `gettext.Translations` interface so
that we can use `.ugettext` to localize strings.
See :class:`ModuleSystem` for documentation of other attributes.
"""
def __init__( # pylint: disable=invalid-name
self,
ajax_url,
anonymous_student_id,
cache,
can_execute_unsafe_code,
get_python_lib_zip,
DEBUG, # pylint: disable=invalid-name
filestore,
i18n,
node_path,
render_template,
seed, # Why do we do this if we have self.seed?
STATIC_URL, # pylint: disable=invalid-name
xqueue,
matlab_api_key=None
):
self.ajax_url = ajax_url
self.anonymous_student_id = anonymous_student_id
self.cache = cache
self.can_execute_unsafe_code = can_execute_unsafe_code
self.get_python_lib_zip = get_python_lib_zip
self.DEBUG = DEBUG # pylint: disable=invalid-name
self.filestore = filestore
self.i18n = i18n
self.node_path = node_path
self.render_template = render_template
self.seed = seed # Why do we do this if we have self.seed?
self.STATIC_URL = STATIC_URL # pylint: disable=invalid-name
self.xqueue = xqueue
self.matlab_api_key = matlab_api_key
class LoncapaProblem(object):
"""
Main class for capa Problems.
"""
def __init__(self, problem_text, id, capa_system, capa_module, # pylint: disable=redefined-builtin
state=None, seed=None):
"""
Initializes capa Problem.
Arguments:
problem_text (string): xml defining the problem.
id (string): identifier for this problem, often a filename (no spaces).
capa_system (LoncapaSystem): LoncapaSystem instance which provides OS,
rendering, user context, and other resources.
capa_module: instance needed to access runtime/logging
state (dict): containing the following keys:
- `seed` (int) random number generator seed
- `student_answers` (dict) maps input id to the stored answer for that input
- `correct_map` (CorrectMap) a map of each input to their 'correctness'
- `done` (bool) indicates whether or not this problem is considered done
- `input_state` (dict) maps input_id to a dictionary that holds the state for that input
seed (int): random number generator seed.
"""
## Initialize class variables from state
self.do_reset()
self.problem_id = id
self.capa_system = capa_system
self.capa_module = capa_module
state = state or {}
# Set seed according to the following priority:
# 1. Contained in problem's state
# 2. Passed into capa_problem via constructor
self.seed = state.get('seed', seed)
assert self.seed is not None, "Seed must be provided for LoncapaProblem."
self.student_answers = state.get('student_answers', {})
if 'correct_map' in state:
self.correct_map.set_dict(state['correct_map'])
self.done = state.get('done', False)
self.input_state = state.get('input_state', {})
# Convert startouttext and endouttext to proper <text></text>
problem_text = re.sub(r"startouttext\s*/", "text", problem_text)
problem_text = re.sub(r"endouttext\s*/", "/text", problem_text)
self.problem_text = problem_text
# parse problem XML file into an element tree
self.tree = etree.XML(problem_text)
self.make_xml_compatible(self.tree)
# handle any <include file="foo"> tags
self._process_includes()
# construct script processor context (eg for customresponse problems)
self.context = self._extract_context(self.tree)
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place
# transformations. This also creates the dict (self.responders) of Response
# instances for each question in the problem. The dict has keys = xml subtree of
# Response, values = Response instance
self._preprocess_problem(self.tree)
if not self.student_answers: # True when student_answers is an empty dict
self.set_initial_display()
# dictionary of InputType objects associated with this problem
# input_id string -> InputType object
self.inputs = {}
# Run response late_transforms last (see MultipleChoiceResponse)
# Sort the responses to be in *_1 *_2 ... order.
responses = self.responders.values()
responses = sorted(responses, key=lambda resp: int(resp.id[resp.id.rindex('_') + 1:]))
for response in responses:
if hasattr(response, 'late_transforms'):
response.late_transforms(self)
self.extracted_tree = self._extract_html(self.tree)
def make_xml_compatible(self, tree):
"""
Adjust tree xml in-place for compatibility before creating
a problem from it.
The idea here is to provide a central point for XML translation,
for example, supporting an old XML format. At present, there just two translations.
1. <additional_answer> compatibility translation:
old: <additional_answer>ANSWER</additional_answer>
convert to
new: <additional_answer answer="ANSWER">OPTIONAL-HINT</addional_answer>
2. <optioninput> compatibility translation:
optioninput works like this internally:
<optioninput options="('yellow','blue','green')" correct="blue" />
With extended hints there is a new <option> tag, like this
<option correct="True">blue <optionhint>sky color</optionhint> </option>
This translation takes in the new format and synthesizes the old option= attribute
so all downstream logic works unchanged with the new <option> tag format.
"""
additionals = tree.xpath('//stringresponse/additional_answer')
for additional in additionals:
answer = additional.get('answer')
text = additional.text
if not answer and text: # trigger of old->new conversion
additional.set('answer', text)
additional.text = ''
for optioninput in tree.xpath('//optioninput'):
correct_option = None
child_options = []
for option_element in optioninput.findall('./option'):
option_name = option_element.text.strip()
if option_element.get('correct').upper() == 'TRUE':
correct_option = option_name
child_options.append("'" + option_name + "'")
if len(child_options) > 0:
options_string = '(' + ','.join(child_options) + ')'
optioninput.attrib.update({'options': options_string})
if correct_option:
optioninput.attrib.update({'correct': correct_option})
def do_reset(self):
"""
Reset internal state to unfinished, with no answers
"""
self.student_answers = dict()
self.correct_map = CorrectMap()
self.done = False
def set_initial_display(self):
"""
Set the student's answers to the responders' initial displays, if specified.
"""
initial_answers = dict()
for responder in self.responders.values():
if hasattr(responder, 'get_initial_display'):
initial_answers.update(responder.get_initial_display())
self.student_answers = initial_answers
def __unicode__(self):
return u"LoncapaProblem ({0})".format(self.problem_id)
def get_state(self):
"""
Stored per-user session data neeeded to:
1) Recreate the problem
2) Populate any student answers.
"""
return {'seed': self.seed,
'student_answers': self.student_answers,
'correct_map': self.correct_map.get_dict(),
'input_state': self.input_state,
'done': self.done}
def get_max_score(self):
"""
Return the maximum score for this problem.
"""
maxscore = 0
for responder in self.responders.values():
maxscore += responder.get_max_score()
return maxscore
def get_score(self):
"""
Compute score for this problem. The score is the number of points awarded.
Returns a dictionary {'score': integer, from 0 to get_max_score(),
'total': get_max_score()}.
"""
correct = 0
for key in self.correct_map:
try:
correct += self.correct_map.get_npoints(key)
except Exception:
log.error('key=%s, correct_map = %s', key, self.correct_map)
raise
if (not self.student_answers) or len(self.student_answers) == 0:
return {'score': 0,
'total': self.get_max_score()}
else:
return {'score': correct,
'total': self.get_max_score()}
def update_score(self, score_msg, queuekey):
"""
Deliver grading response (e.g. from async code checking) to
the specific ResponseType that requested grading
Returns an updated CorrectMap
"""
cmap = CorrectMap()
cmap.update(self.correct_map)
for responder in self.responders.values():
if hasattr(responder, 'update_score'):
# Each LoncapaResponse will update its specific entries in cmap
# cmap is passed by reference
responder.update_score(score_msg, cmap, queuekey)
self.correct_map.set_dict(cmap.get_dict())
return cmap
def ungraded_response(self, xqueue_msg, queuekey):
"""
Handle any responses from the xqueue that do not contain grades
Will try to pass the queue message to all inputtypes that can handle ungraded responses
Does not return any value
"""
# check against each inputtype
for the_input in self.inputs.values():
# if the input type has an ungraded function, pass in the values
if hasattr(the_input, 'ungraded_response'):
the_input.ungraded_response(xqueue_msg, queuekey)
def is_queued(self):
"""
Returns True if any part of the problem has been submitted to an external queue
(e.g. for grading.)
"""
return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)
def get_recentmost_queuetime(self):
"""
Returns a DateTime object that represents the timestamp of the most recent
queueing request, or None if not queued
"""
if not self.is_queued():
return None
# Get a list of timestamps of all queueing requests, then convert it to a DateTime object
queuetime_strs = [
self.correct_map.get_queuetime_str(answer_id)
for answer_id in self.correct_map
if self.correct_map.is_queued(answer_id)
]
queuetimes = [
datetime.strptime(qt_str, xqueue_interface.dateformat).replace(tzinfo=UTC)
for qt_str in queuetime_strs
]
return max(queuetimes)
def grade_answers(self, answers):
"""
Grade student responses. Called by capa_module.check_problem.
`answers` is a dict of all the entries from request.POST, but with the first part
of each key removed (the string before the first "_").
Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123
Calls the Response for each question in this problem, to do the actual grading.
"""
# if answers include File objects, convert them to filenames.
self.student_answers = convert_files_to_filenames(answers)
return self._grade_answers(answers)
def supports_rescoring(self):
"""
Checks that the current problem definition permits rescoring.
More precisely, it checks that there are no response types in
the current problem that are not fully supported (yet) for rescoring.
This includes responsetypes for which the student's answer
is not properly stored in state, i.e. file submissions. At present,
we have no way to know if an existing response was actually a real
answer or merely the filename of a file submitted as an answer.
It turns out that because rescoring is a background task, limiting
it to responsetypes that don't support file submissions also means
that the responsetypes are synchronous. This is convenient as it
permits rescoring to be complete when the rescoring call returns.
"""
return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
def rescore_existing_answers(self):
"""
Rescore student responses. Called by capa_module.rescore_problem.
"""
return self._grade_answers(None)
def _grade_answers(self, student_answers):
"""
Internal grading call used for checking new 'student_answers' and also
rescoring existing student_answers.
For new student_answers being graded, `student_answers` is a dict of all the
entries from request.POST, but with the first part of each key removed
(the string before the first "_"). Thus, for example,
input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123.
For rescoring, `student_answers` is None.
Calls the Response for each question in this problem, to do the actual grading.
"""
# old CorrectMap
oldcmap = self.correct_map
# start new with empty CorrectMap
newcmap = CorrectMap()
# Call each responsetype instance to do actual grading
for responder in self.responders.values():
# File objects are passed only if responsetype explicitly allows
# for file submissions. But we have no way of knowing if
# student_answers contains a proper answer or the filename of
# an earlier submission, so for now skip these entirely.
# TODO: figure out where to get file submissions when rescoring.
if 'filesubmission' in responder.allowed_inputfields and student_answers is None:
_ = self.capa_system.i18n.ugettext
raise Exception(_(u"Cannot rescore problems with possible file submissions"))
# use 'student_answers' only if it is provided, and if it might contain a file
# submission that would not exist in the persisted "student_answers".
if 'filesubmission' in responder.allowed_inputfields and student_answers is not None:
results = responder.evaluate_answers(student_answers, oldcmap)
else:
results = responder.evaluate_answers(self.student_answers, oldcmap)
newcmap.update(results)
self.correct_map = newcmap
return newcmap
def get_question_answers(self):
"""
Returns a dict of answer_ids to answer values. If we cannot generate
an answer (this sometimes happens in customresponses), that answer_id is
not included. Called by "show answers" button JSON request
(see capa_module)
"""
# dict of (id, correct_answer)
answer_map = dict()
for response in self.responders.keys():
results = self.responder_answers[response]
answer_map.update(results)
# include solutions from <solution>...</solution> stanzas
for entry in self.tree.xpath("//" + "|//".join(solution_tags)):
answer = etree.tostring(entry)
if answer:
answer_map[entry.get('id')] = contextualize_text(answer, self.context)
log.debug('answer_map = %s', answer_map)
return answer_map
def get_answer_ids(self):
"""
Return the IDs of all the responses -- these are the keys used for
the dicts returned by grade_answers and get_question_answers. (Though
get_question_answers may only return a subset of these.
"""
answer_ids = []
for response in self.responders.keys():
results = self.responder_answers[response]
answer_ids.append(results.keys())
return answer_ids
def do_targeted_feedback(self, tree):
"""
Implements targeted-feedback in-place on <multiplechoiceresponse> --
choice-level explanations shown to a student after submission.
Does nothing if there is no targeted-feedback attribute.
"""
# Note that the modifications has been done, avoiding problems if called twice.
if hasattr(self, 'has_targeted'):
return
self.has_targeted = True # pylint: disable=attribute-defined-outside-init
for mult_choice_response in tree.xpath('//multiplechoiceresponse[@targeted-feedback]'):
show_explanation = mult_choice_response.get('targeted-feedback') == 'alwaysShowCorrectChoiceExplanation'
# Grab the first choicegroup (there should only be one within each <multiplechoiceresponse> tag)
choicegroup = mult_choice_response.xpath('./choicegroup[@type="MultipleChoice"]')[0]
choices_list = list(choicegroup.iter('choice'))
# Find the student answer key that matches our <choicegroup> id
student_answer = self.student_answers.get(choicegroup.get('id'))
expl_id_for_student_answer = None
# Keep track of the explanation-id that corresponds to the student's answer
# Also, keep track of the solution-id
solution_id = None
for choice in choices_list:
if choice.get('name') == student_answer:
expl_id_for_student_answer = choice.get('explanation-id')
if choice.get('correct') == 'true':
solution_id = choice.get('explanation-id')
# Filter out targetedfeedback that doesn't correspond to the answer the student selected
# Note: following-sibling will grab all following siblings, so we just want the first in the list
targetedfeedbackset = mult_choice_response.xpath('./following-sibling::targetedfeedbackset')
if len(targetedfeedbackset) != 0:
targetedfeedbackset = targetedfeedbackset[0]
targetedfeedbacks = targetedfeedbackset.xpath('./targetedfeedback')
for targetedfeedback in targetedfeedbacks:
# Don't show targeted feedback if the student hasn't answer the problem
# or if the target feedback doesn't match the student's (incorrect) answer
if not self.done or targetedfeedback.get('explanation-id') != expl_id_for_student_answer:
targetedfeedbackset.remove(targetedfeedback)
# Do not displace the solution under these circumstances
if not show_explanation or not self.done:
continue
# The next element should either be <solution> or <solutionset>
next_element = targetedfeedbackset.getnext()
parent_element = tree
solution_element = None
if next_element is not None and next_element.tag == 'solution':
solution_element = next_element
elif next_element is not None and next_element.tag == 'solutionset':
solutions = next_element.xpath('./solution')
for solution in solutions:
if solution.get('explanation-id') == solution_id:
parent_element = next_element
solution_element = solution
# If could not find the solution element, then skip the remaining steps below
if solution_element is None:
continue
# Change our correct-choice explanation from a "solution explanation" to within
# the set of targeted feedback, which means the explanation will render on the page
# without the student clicking "Show Answer" or seeing a checkmark next to the correct choice
parent_element.remove(solution_element)
# Add our solution instead to the targetedfeedbackset and change its tag name
solution_element.tag = 'targetedfeedback'
targetedfeedbackset.append(solution_element)
def get_html(self):
"""
Main method called externally to get the HTML to be rendered for this capa Problem.
"""
self.do_targeted_feedback(self.tree)
html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)
return html
def handle_input_ajax(self, data):
"""
InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data
Also, parse out the dispatch from the get so that it can be passed onto the input type nicely
"""
# pull out the id
input_id = data['input_id']
if self.inputs[input_id]:
dispatch = data['dispatch']
return self.inputs[input_id].handle_ajax(dispatch, data)
else:
log.warning("Could not find matching input for id: %s", input_id)
return {}
@property
def has_responsive_ui(self):
"""
Returns whether this capa problem has support for responsive UI.
"""
return all(responder.has_responsive_ui for responder in self.responders.values())
# ======= Private Methods Below ========
def _process_includes(self):
"""
Handle any <include file="foo"> tags by reading in the specified file and inserting it
into our XML tree. Fail gracefully if debugging.
"""
includes = self.tree.findall('.//include')
for inc in includes:
filename = inc.get('file')
if filename is not None:
try:
# open using LoncapaSystem OSFS filestore
ifp = self.capa_system.filestore.open(filename)
except Exception as err:
log.warning(
'Error %s in problem xml include: %s',
err,
etree.tostring(inc, pretty_print=True)
)
log.warning(
'Cannot find file %s in %s', filename, self.capa_system.filestore
)
# if debugging, don't fail - just log error
# TODO (vshnayder): need real error handling, display to users
if not self.capa_system.DEBUG:
raise
else:
continue
try:
# read in and convert to XML
incxml = etree.XML(ifp.read())
except Exception as err:
log.warning(
'Error %s in problem xml include: %s',
err,
etree.tostring(inc, pretty_print=True)
)
log.warning('Cannot parse XML in %s', (filename))
# if debugging, don't fail - just log error
# TODO (vshnayder): same as above
if not self.capa_system.DEBUG:
raise
else:
continue
# insert new XML into tree in place of include
parent = inc.getparent()
parent.insert(parent.index(inc), incxml)
parent.remove(inc)
log.debug('Included %s into %s', filename, self.problem_id)
def _extract_system_path(self, script):
"""
Extracts and normalizes additional paths for code execution.
For now, there's a default path of data/course/code; this may be removed
at some point.
script : ?? (TODO)
"""
DEFAULT_PATH = ['code']
# Separate paths by :, like the system path.
raw_path = script.get('system_path', '').split(":") + DEFAULT_PATH
# find additional comma-separated modules search path
path = []
for dir in raw_path:
if not dir:
continue
# path is an absolute path or a path relative to the data dir
dir = os.path.join(self.capa_system.filestore.root_path, dir)
# Check that we are within the filestore tree.
reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)
if ".." in reldir:
log.warning("Ignoring Python directory outside of course: %r", dir)
continue
abs_dir = os.path.normpath(dir)
path.append(abs_dir)
return path
def _extract_context(self, tree):
"""
Extract content of <script>...</script> from the problem.xml file, and exec it in the
context of this problem. Provides ability to randomize problems, and also set
variables for problem answer checking.
Problem XML goes to Python execution context. Runs everything in script tags.
"""
context = {}
context['seed'] = self.seed
context['anonymous_student_id'] = self.capa_system.anonymous_student_id
all_code = ''
python_path = []
for script in tree.findall('.//script'):
stype = script.get('type')
if stype:
if 'javascript' in stype:
continue # skip javascript
if 'perl' in stype:
continue # skip perl
# TODO: evaluate only python
for d in self._extract_system_path(script):
if d not in python_path and os.path.exists(d):
python_path.append(d)
XMLESC = {"'": "'", """: '"'}
code = unescape(script.text, XMLESC)
all_code += code
extra_files = []
if all_code:
# An asset named python_lib.zip can be imported by Python code.
zip_lib = self.capa_system.get_python_lib_zip()
if zip_lib is not None:
extra_files.append(("python_lib.zip", zip_lib))
python_path.append("python_lib.zip")
try:
safe_exec(
all_code,
context,
random_seed=self.seed,
python_path=python_path,
extra_files=extra_files,
cache=self.capa_system.cache,
slug=self.problem_id,
unsafely=self.capa_system.can_execute_unsafe_code(),
)
except Exception as err:
log.exception("Error while execing script code: " + all_code)
msg = "Error while executing script code: %s" % str(err).replace('<', '<')
raise responsetypes.LoncapaProblemError(msg)
# Store code source in context, along with the Python path needed to run it correctly.
context['script_code'] = all_code
context['python_path'] = python_path
context['extra_files'] = extra_files or None
return context
def _extract_html(self, problemtree): # private
"""
Main (private) function which converts Problem XML tree to HTML.
Calls itself recursively.
Returns Element tree of XHTML representation of problemtree.
Calls render_html of Response instances to render responses into XHTML.
Used by get_html.
"""
if not isinstance(problemtree.tag, basestring):
# Comment and ProcessingInstruction nodes are not Elements,
# and we're ok leaving those behind.
# BTW: etree gives us no good way to distinguish these things
# other than to examine .tag to see if it's a string. :(
return
if (problemtree.tag == 'script' and problemtree.get('type')
and 'javascript' in problemtree.get('type')):
# leave javascript intact.
return deepcopy(problemtree)
if problemtree.tag in html_problem_semantics:
return
problemid = problemtree.get('id') # my ID
if problemtree.tag in inputtypes.registry.registered_tags():
# If this is an inputtype subtree, let it render itself.
status = "unsubmitted"
msg = ''
hint = ''
hintmode = None
input_id = problemtree.get('id')
answervariable = None
if problemid in self.correct_map:
pid = input_id
status = self.correct_map.get_correctness(pid)
msg = self.correct_map.get_msg(pid)
hint = self.correct_map.get_hint(pid)
hintmode = self.correct_map.get_hintmode(pid)
answervariable = self.correct_map.get_property(pid, 'answervariable')
value = ""
if self.student_answers and problemid in self.student_answers:
value = self.student_answers[problemid]
if input_id not in self.input_state:
self.input_state[input_id] = {}
# do the rendering
state = {
'value': value,
'status': status,
'id': input_id,
'input_state': self.input_state[input_id],
'answervariable': answervariable,
'feedback': {
'message': msg,
'hint': hint,
'hintmode': hintmode,
}
}
input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag)
# save the input type so that we can make ajax calls on it if we need to
self.inputs[input_id] = input_type_cls(self.capa_system, problemtree, state)
return self.inputs[input_id].get_html()
# let each Response render itself
if problemtree in self.responders:
overall_msg = self.correct_map.get_overall_message()
return self.responders[problemtree].render_html(
self._extract_html, response_msg=overall_msg
)
# let each custom renderer render itself:
if problemtree.tag in customrender.registry.registered_tags():
renderer_class = customrender.registry.get_class_for_tag(problemtree.tag)
renderer = renderer_class(self.capa_system, problemtree)
return renderer.get_html()
# otherwise, render children recursively, and copy over attributes
tree = etree.Element(problemtree.tag)
for item in problemtree:
item_xhtml = self._extract_html(item)
if item_xhtml is not None:
tree.append(item_xhtml)
if tree.tag in html_transforms:
tree.tag = html_transforms[problemtree.tag]['tag']
else:
# copy attributes over if not innocufying
for (key, value) in problemtree.items():
tree.set(key, value)
tree.text = problemtree.text
tree.tail = problemtree.tail
return tree
def _preprocess_problem(self, tree): # private
"""
Assign IDs to all the responses
Assign sub-IDs to all entries (textline, schematic, etc.)
Annoted correctness and value
In-place transformation
Also create capa Response instances for each responsetype and save as self.responders
Obtain all responder answers and save as self.responder_answers dict (key = response)
"""
response_id = 1
self.responders = {}
for response in tree.xpath('//' + "|//".join(responsetypes.registry.registered_tags())):
response_id_str = self.problem_id + "_" + str(response_id)
# create and save ID for this response
response.set('id', response_id_str)
response_id += 1
answer_id = 1
input_tags = inputtypes.registry.registered_tags()
inputfields = tree.xpath(
"|".join(['//' + response.tag + '[@id=$id]//' + x for x in input_tags + solution_tags]),
id=response_id_str
)
# assign one answer_id for each input type or solution type
for entry in inputfields:
entry.attrib['response_id'] = str(response_id)
entry.attrib['answer_id'] = str(answer_id)
entry.attrib['id'] = "%s_%i_%i" % (self.problem_id, response_id, answer_id)
answer_id = answer_id + 1
# instantiate capa Response
responsetype_cls = responsetypes.registry.get_class_for_tag(response.tag)
responder = responsetype_cls(response, inputfields, self.context, self.capa_system, self.capa_module)
# save in list in self
self.responders[response] = responder
# get responder answers (do this only once, since there may be a performance cost,
# eg with externalresponse)
self.responder_answers = {}
for response in self.responders.keys():
try:
self.responder_answers[response] = self.responders[response].get_answers()
except:
log.debug('responder %s failed to properly return get_answers()',
self.responders[response]) # FIXME
raise
# <solution>...</solution> may not be associated with any specific response; give
# IDs for those separately
# TODO: We should make the namespaces consistent and unique (e.g. %s_problem_%i).
solution_id = 1
for solution in tree.findall('.//solution'):
solution.attrib['id'] = "%s_solution_%i" % (self.problem_id, solution_id)
solution_id += 1
| agpl-3.0 |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/tablib-0.10.0/tablib/packages/yaml/tokens.py | 985 | 2573 |
class Token(object):
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in self.__dict__
if not key.endswith('_mark')]
attributes.sort()
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
#class BOMToken(Token):
# id = '<byte order mark>'
class DirectiveToken(Token):
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
self.name = name
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class DocumentStartToken(Token):
id = '<document start>'
class DocumentEndToken(Token):
id = '<document end>'
class StreamStartToken(Token):
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None,
encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndToken(Token):
id = '<stream end>'
class BlockSequenceStartToken(Token):
id = '<block sequence start>'
class BlockMappingStartToken(Token):
id = '<block mapping start>'
class BlockEndToken(Token):
id = '<block end>'
class FlowSequenceStartToken(Token):
id = '['
class FlowMappingStartToken(Token):
id = '{'
class FlowSequenceEndToken(Token):
id = ']'
class FlowMappingEndToken(Token):
id = '}'
class KeyToken(Token):
id = '?'
class ValueToken(Token):
id = ':'
class BlockEntryToken(Token):
id = '-'
class FlowEntryToken(Token):
id = ','
class AliasToken(Token):
id = '<alias>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class AnchorToken(Token):
id = '<anchor>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class TagToken(Token):
id = '<tag>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class ScalarToken(Token):
id = '<scalar>'
def __init__(self, value, plain, start_mark, end_mark, style=None):
self.value = value
self.plain = plain
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
| apache-2.0 |
heeraj123/oh-mainline | vendor/packages/scrapy/scrapyd/tests/test_poller.py | 30 | 1416 | import os
from twisted.trial import unittest
from twisted.internet.defer import Deferred
from zope.interface.verify import verifyObject
from scrapyd.interfaces import IPoller
from scrapyd.config import Config
from scrapyd.poller import QueuePoller
from scrapyd.utils import get_spider_queues
class QueuePollerTest(unittest.TestCase):
def setUp(self):
d = self.mktemp()
eggs_dir = os.path.join(d, 'eggs')
dbs_dir = os.path.join(d, 'dbs')
os.makedirs(eggs_dir)
os.makedirs(dbs_dir)
os.makedirs(os.path.join(eggs_dir, 'mybot1'))
os.makedirs(os.path.join(eggs_dir, 'mybot2'))
config = Config(values={'eggs_dir': eggs_dir, 'dbs_dir': dbs_dir})
self.queues = get_spider_queues(config)
self.poller = QueuePoller(config)
def test_interface(self):
verifyObject(IPoller, self.poller)
def test_poll_next(self):
self.queues['mybot1'].add('spider1')
self.queues['mybot2'].add('spider2')
d1 = self.poller.next()
d2 = self.poller.next()
self.failUnless(isinstance(d1, Deferred))
self.failIf(hasattr(d1, 'result'))
self.poller.poll()
self.queues['mybot1'].pop()
self.poller.poll()
self.failUnlessEqual(d1.result, {'_project': 'mybot1', '_spider': 'spider1'})
self.failUnlessEqual(d2.result, {'_project': 'mybot2', '_spider': 'spider2'})
| agpl-3.0 |
cloud9UG/odoo | openerp/report/render/rml2pdf/utils.py | 381 | 7143 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2003, Fabien Pinckaers, UCL, FSA
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import copy
import locale
import logging
import re
import reportlab
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.misc import ustr
_logger = logging.getLogger(__name__)
_regex = re.compile('\[\[(.+?)\]\]')
def str2xml(s):
return (s or '').replace('&', '&').replace('<', '<').replace('>', '>')
def xml2str(s):
return (s or '').replace('&','&').replace('<','<').replace('>','>')
def _child_get(node, self=None, tagname=None):
for n in node:
if self and self.localcontext and n.get('rml_loop'):
for ctx in eval(n.get('rml_loop'),{}, self.localcontext):
self.localcontext.update(ctx)
if (tagname is None) or (n.tag==tagname):
if n.get('rml_except', False):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr)
yield n2
except GeneratorExit:
yield n
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
yield n
else:
yield n
continue
if self and self.localcontext and n.get('rml_except'):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if self and self.localcontext and n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr or {})
yield n2
tagname = ''
except GeneratorExit:
pass
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
pass
if (tagname is None) or (n.tag==tagname):
yield n
def _process_text(self, txt):
"""Translate ``txt`` according to the language in the local context,
replace dynamic ``[[expr]]`` with their real value, then escape
the result for XML.
:param str txt: original text to translate (must NOT be XML-escaped)
:return: translated text, with dynamic expressions evaluated and
with special XML characters escaped (``&,<,>``).
"""
if not self.localcontext:
return str2xml(txt)
if not txt:
return ''
result = ''
sps = _regex.split(txt)
while sps:
# This is a simple text to translate
to_translate = tools.ustr(sps.pop(0))
result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate))
if sps:
txt = None
try:
expr = sps.pop(0)
txt = eval(expr, self.localcontext)
if txt and isinstance(txt, basestring):
txt = tools.ustr(txt)
except Exception:
_logger.error("Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.", expr, self.localcontext)
if isinstance(txt, basestring):
result += txt
elif txt and (txt is not None) and (txt is not False):
result += ustr(txt)
return str2xml(result)
def text_get(node):
return ''.join([ustr(n.text) for n in node])
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
if size:
if size.find('.') == -1:
decimal_point = '.'
try:
decimal_point = locale.nl_langinfo(locale.RADIXCHAR)
except Exception:
decimal_point = locale.localeconv()['decimal_point']
size = size.replace(decimal_point, '.')
for unit in units:
res = unit[0].search(size, 0)
if res:
return unit[1]*float(res.group(1))
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
return map(int, node.get(attr_name).split(','))
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = tools.ustr(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
elif dict[key]=='unit':
res[key] = unit_get(node.get(key))
elif dict[key] == 'float' :
res[key] = float(node.get(key))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
LinuxChristian/home-assistant | homeassistant/components/sensor/netdata.py | 2 | 5062 | """
Support gathering system information of hosts which are running netdata.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.netdata/
"""
import logging
from datetime import timedelta
from urllib.parse import urlsplit
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_NAME, CONF_RESOURCES)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'api/v1'
_REALTIME = 'before=0&after=-1&options=seconds'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Netdata'
DEFAULT_PORT = '19999'
SCAN_INTERVAL = timedelta(minutes=1)
SENSOR_TYPES = {
'memory_free': ['RAM Free', 'MiB', 'system.ram', 'free', 1],
'memory_used': ['RAM Used', 'MiB', 'system.ram', 'used', 1],
'memory_cached': ['RAM Cached', 'MiB', 'system.ram', 'cached', 1],
'memory_buffers': ['RAM Buffers', 'MiB', 'system.ram', 'buffers', 1],
'swap_free': ['Swap Free', 'MiB', 'system.swap', 'free', 1],
'swap_used': ['Swap Used', 'MiB', 'system.swap', 'used', 1],
'processes_running': ['Processes Running', 'Count', 'system.processes',
'running', 0],
'processes_blocked': ['Processes Blocked', 'Count', 'system.processes',
'blocked', 0],
'system_load': ['System Load', '15 min', 'system.load', 'load15', 2],
'system_io_in': ['System IO In', 'Count', 'system.io', 'in', 0],
'system_io_out': ['System IO Out', 'Count', 'system.io', 'out', 0],
'ipv4_in': ['IPv4 In', 'kb/s', 'system.ipv4', 'received', 0],
'ipv4_out': ['IPv4 Out', 'kb/s', 'system.ipv4', 'sent', 0],
'disk_free': ['Disk Free', 'GiB', 'disk_space._', 'avail', 2],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_RESOURCES, default=['memory_free']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Netdata sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = 'http://{}:{}'.format(host, port)
data_url = '{}/{}/data?chart='.format(url, _RESOURCE)
resources = config.get(CONF_RESOURCES)
values = {}
for key, value in sorted(SENSOR_TYPES.items()):
if key in resources:
values.setdefault(value[2], []).append(key)
dev = []
for chart in values:
rest_url = '{}{}&{}'.format(data_url, chart, _REALTIME)
rest = NetdataData(rest_url)
rest.update()
for sensor_type in values[chart]:
dev.append(NetdataSensor(rest, name, sensor_type))
add_devices(dev, True)
class NetdataSensor(Entity):
"""Implementation of a Netdata sensor."""
def __init__(self, rest, name, sensor_type):
"""Initialize the Netdata sensor."""
self.rest = rest
self.type = sensor_type
self._name = '{} {}'.format(name, SENSOR_TYPES[self.type][0])
self._precision = SENSOR_TYPES[self.type][4]
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the resources."""
value = self.rest.data
if value is not None:
netdata_id = SENSOR_TYPES[self.type][3]
if netdata_id in value:
return "{0:.{1}f}".format(value[netdata_id], self._precision)
return None
@property
def available(self):
"""Could the resource be accessed during the last update call."""
return self.rest.available
def update(self):
"""Get the latest data from Netdata REST API."""
self.rest.update()
class NetdataData(object):
"""The class for handling the data retrieval."""
def __init__(self, resource):
"""Initialize the data object."""
self._resource = resource
self.data = None
self.available = True
def update(self):
"""Get the latest data from the Netdata REST API."""
try:
response = requests.get(self._resource, timeout=5)
det = response.json()
self.data = {k: v for k, v in zip(det['labels'], det['data'][0])}
self.available = True
except requests.exceptions.ConnectionError:
_LOGGER.error("Connection error: %s", urlsplit(self._resource)[1])
self.data = None
self.available = False
| apache-2.0 |
lctseng/NCTU-SDN-Project | openvswitch-2.3.0/tests/test-reconnect.py | 4 | 5682 | # Copyright (c) 2009, 2010, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import sys
import ovs.reconnect
now = 0
r = None
def do_enable(_):
r.enable(now)
def do_disable(_):
r.disable(now)
def do_force_reconnect(_):
r.force_reconnect(now)
def error_from_string(s):
if not s:
return 0
elif s == "ECONNREFUSED":
return errno.ECONNREFUSED
elif s == "EOF":
return ovs.reconnect.EOF
else:
sys.stderr.write("unknown error '%s'\n" % s)
sys.exit(1)
def do_disconnected(arg):
r.disconnected(now, error_from_string(arg))
def do_connecting(_):
r.connecting(now)
def do_connect_failed(arg):
r.connect_failed(now, error_from_string(arg))
def do_connected(_):
r.connected(now)
def do_activity(_):
r.activity(now)
def do_run(arg):
global now
if arg is not None:
now += int(arg)
action = r.run(now)
if action is None:
pass
elif action == ovs.reconnect.CONNECT:
print " should connect"
elif action == ovs.reconnect.DISCONNECT:
print " should disconnect"
elif action == ovs.reconnect.PROBE:
print " should send probe"
else:
assert False
def do_advance(arg):
global now
now += int(arg)
def do_timeout(_):
global now
timeout = r.timeout(now)
if timeout >= 0:
print " advance %d ms" % timeout
now += timeout
else:
print " no timeout"
def do_set_max_tries(arg):
r.set_max_tries(int(arg))
def diff_stats(old, new, delta):
if (old.state != new.state or
old.state_elapsed != new.state_elapsed or
old.backoff != new.backoff):
print(" in %s for %d ms (%d ms backoff)"
% (new.state, new.state_elapsed, new.backoff))
if (old.creation_time != new.creation_time or
old.last_activity != new.last_activity or
old.last_connected != new.last_connected):
print(" created %d, last activity %d, last connected %d"
% (new.creation_time, new.last_activity, new.last_connected))
if (old.n_successful_connections != new.n_successful_connections or
old.n_attempted_connections != new.n_attempted_connections or
old.seqno != new.seqno):
print(" %d successful connections out of %d attempts, seqno %d"
% (new.n_successful_connections, new.n_attempted_connections,
new.seqno))
if (old.is_connected != new.is_connected):
if new.is_connected:
negate = ""
else:
negate = "dis"
print(" %sconnected" % negate)
if (old.last_connected != new.last_connected or
(new.msec_since_connect != None and
old.msec_since_connect != new.msec_since_connect - delta) or
(old.total_connected_duration != new.total_connected_duration - delta
and not (old.total_connected_duration == 0 and
new.total_connected_duration == 0))):
print(" last connected %d ms ago, connected %d ms total"
% (new.msec_since_connect, new.total_connected_duration))
if (old.last_disconnected != new.last_disconnected or
(new.msec_since_disconnect != None and
old.msec_since_disconnect != new.msec_since_disconnect - delta)):
print(" disconnected at %d ms (%d ms ago)"
% (new.last_disconnected, new.msec_since_disconnect))
def do_set_passive(_):
r.set_passive(True, now)
def do_listening(_):
r.listening(now)
def do_listen_error(arg):
r.listen_error(now, int(arg))
def main():
commands = {
"enable": do_enable,
"disable": do_disable,
"force-reconnect": do_force_reconnect,
"disconnected": do_disconnected,
"connecting": do_connecting,
"connect-failed": do_connect_failed,
"connected": do_connected,
"activity": do_activity,
"run": do_run,
"advance": do_advance,
"timeout": do_timeout,
"set-max-tries": do_set_max_tries,
"passive": do_set_passive,
"listening": do_listening,
"listen-error": do_listen_error
}
global now
global r
now = 1000
r = ovs.reconnect.Reconnect(now)
r.set_name("remote")
prev = r.get_stats(now)
print "### t=%d ###" % now
old_time = now
old_max_tries = r.get_max_tries()
while True:
line = sys.stdin.readline()
if line == "":
break
print line[:-1]
if line[0] == "#":
continue
args = line.split()
if len(args) == 0:
continue
command = args[0]
if len(args) > 1:
op = args[1]
else:
op = None
commands[command](op)
if old_time != now:
print
print "### t=%d ###" % now
cur = r.get_stats(now)
diff_stats(prev, cur, now - old_time)
prev = cur
if r.get_max_tries() != old_max_tries:
old_max_tries = r.get_max_tries()
print " %d tries left" % old_max_tries
old_time = now
if __name__ == '__main__':
main()
| apache-2.0 |
reddraggone9/youtube-dl | youtube_dl/extractor/chaturbate.py | 13 | 1635 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class ChaturbateIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.com/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.chaturbate.com/siswet19/',
'info_dict': {
'id': 'siswet19',
'ext': 'mp4',
'title': 're:^siswet19 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'age_limit': 18,
'is_live': True,
},
'params': {
'skip_download': True,
}
}, {
'url': 'https://en.chaturbate.com/siswet19/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m3u8_url = self._search_regex(
r'src=(["\'])(?P<url>http.+?\.m3u8.*?)\1', webpage,
'playlist', default=None, group='url')
if not m3u8_url:
error = self._search_regex(
r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>',
webpage, 'error', group='error')
raise ExtractorError(error, expected=True)
formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
return {
'id': video_id,
'title': self._live_title(video_id),
'thumbnail': 'https://cdn-s.highwebmedia.com/uHK3McUtGCG3SMFcd4ZJsRv8/roomimage/%s.jpg' % video_id,
'age_limit': self._rta_search(webpage),
'is_live': True,
'formats': formats,
}
| unlicense |
fracting/depot_tools | third_party/gsutil/gslib/commands/getlogging.py | 51 | 5500 | # Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.command import Command
from gslib.command import COMMAND_NAME
from gslib.command import COMMAND_NAME_ALIASES
from gslib.command import CONFIG_REQUIRED
from gslib.command import FILE_URIS_OK
from gslib.command import MAX_ARGS
from gslib.command import MIN_ARGS
from gslib.command import PROVIDER_URIS_OK
from gslib.command import SUPPORTED_SUB_ARGS
from gslib.command import URIS_START_ARG
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HelpType
from gslib.help_provider import HELP_TYPE
_detailed_help_text = ("""
<B>SYNOPSIS</B>
gsutil getlogging uri
<B>DESCRIPTION</B>
If logging is enabled for the specified bucket uri, the server responds
with a <Logging> XML element that looks something like this:
<?xml version="1.0" ?>
<Logging>
<LogBucket>
logs-bucket
</LogBucket>
<LogObjectPrefix>
my-logs-enabled-bucket
</LogObjectPrefix>
</Logging>
If logging is not enabled, an empty <Logging> element is returned.
You can download log data from your log bucket using the gsutil cp command.
<B>ACCESS LOG FIELDS</B>
Field Type Description
time_micros integer The time that the request was completed, in
microseconds since the Unix epoch.
c_ip string The IP address from which the request was made.
The "c" prefix indicates that this is information
about the client.
c_ip_type integer The type of IP in the c_ip field:
A value of 1 indicates an IPV4 address.
A value of 2 indicates an IPV6 address.
c_ip_region string Reserved for future use.
cs_method string The HTTP method of this request. The "cs" prefix
indicates that this information was sent from the
client to the server.
cs_uri string The URI of the request.
sc_status integer The HTTP status code the server sent in response.
The "sc" prefix indicates that this information
was sent from the server to the client.
cs_bytes integer The number of bytes sent in the request.
sc_bytes integer The number of bytes sent in the response.
time_taken_micros integer The time it took to serve the request in
microseconds.
cs_host string The host in the original request.
cs_referrer string The HTTP referrer for the request.
cs_user_agent string The User-Agent of the request.
s_request_id string The request identifier.
cs_operation string The Google Cloud Storage operation e.g.
GET_Object.
cs_bucket string The bucket specified in the request. If this is a
list buckets request, this can be null.
cs_object string The object specified in this request. This can be
null.
<B>STORAGE DATA FIELDS</B>
Field Type Description
bucket string The name of the bucket.
storage_byte_hours integer Average size in bytes/per hour of that bucket.
""")
class GetLoggingCommand(Command):
"""Implementation of gsutil getlogging command."""
# Command specification (processed by parent class).
command_spec = {
# Name of command.
COMMAND_NAME : 'getlogging',
# List of command name aliases.
COMMAND_NAME_ALIASES : [],
# Min number of args required by this command.
MIN_ARGS : 1,
# Max number of args required by this command, or NO_MAX.
MAX_ARGS : 1,
# Getopt-style string specifying acceptable sub args.
SUPPORTED_SUB_ARGS : '',
# True if file URIs acceptable for this command.
FILE_URIS_OK : False,
# True if provider-only URIs acceptable for this command.
PROVIDER_URIS_OK : False,
# Index in args of first URI arg.
URIS_START_ARG : 0,
# True if must configure gsutil before running command.
CONFIG_REQUIRED : True,
}
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'getlogging',
# List of help name aliases.
HELP_NAME_ALIASES : [],
# Type of help:
HELP_TYPE : HelpType.COMMAND_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Get logging configuration for a bucket',
# The full help text.
HELP_TEXT : _detailed_help_text,
}
# Command entry point.
def RunCommand(self):
self.GetXmlSubresource('logging', self.args[0])
return 0
| bsd-3-clause |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/OpenGL/GL/ARB/ES2_compatibility.py | 2 | 1257 | '''OpenGL extension ARB.ES2_compatibility
This module customises the behaviour of the
OpenGL.raw.GL.ARB.ES2_compatibility to provide a more
Python-friendly API
Overview (from the spec)
This extension adds support for features of OpenGL ES 2.0 that are
missing from OpenGL 3.x. Enabling these features will ease the process
of porting applications from OpenGL ES 2.0 to OpenGL.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/ES2_compatibility.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.ES2_compatibility import *
### END AUTOGENERATED SECTION
from OpenGL import lazywrapper
from OpenGL.arrays import GLintArray
@lazywrapper.lazy( glGetShaderPrecisionFormat )
def glGetShaderPrecisionFormat(baseOperation, shadertype, precisiontype, range=None,precision=None ):
"""Provides range and precision if not provided, returns (range,precision)"""
if range is None:
range = GLintArray.zeros( (2,))
if precision is None:
precision = GLintArray.zeros((2,))
baseOperation( shadertype, precisiontype, range, precision )
return range, precision
| mit |
sureshthalamati/spark | examples/src/main/python/ml/decision_tree_classification_example.py | 123 | 3003 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Classification Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DecisionTreeClassificationExample")\
.getOrCreate()
# $example on$
# Load the data stored in LIBSVM format as a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
treeModel = model.stages[2]
# summary only
print(treeModel)
# $example off$
spark.stop()
| apache-2.0 |
MG-group-tools/MGFunc | mgfunc_v2/cluster2fasta.py | 1 | 15574 | #!/usr/bin/env python2.7
import sys
import os
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 13:13:45 2013
CLASS-VERSION
@author: Kosai
"""
import cPickle as pickle
from datetime import datetime as dt
import time
import argparse
import gzip
class main:
'''
Class version of the cluster2fasta program
'''
def __init__(self):
self.start = time.time()
self.d_ = dt.today()
self.timestarted = self.d_.strftime("%d-%m-%Y %H:%M:%S")
self._D = {}
self.parseArgs()
def parseArgs(self):
parser = argparse.ArgumentParser(prog="cluster2fasta.py", usage="cluster2fasta.py -c mycluster.txt -o mycluster.output -num [-ui uniprot.index\/uniprot.index.p -uf uniprot.fasta] [-ki SAMPLE.index\/SAMPLE.index.p -kf SAMPLE.fasta]", epilog="Written by Kosai+Asli, oct 2013. Last modified apr 2014.")
parser.add_argument("-ui",metavar="uniprot_index_file",help="Uniprot index file",nargs="*")
parser.add_argument("-uf",metavar="uniprot_fasta",help="Fasta-file for all uniprot (from swiss2fasta)",nargs="*")
parser.add_argument("-ki",metavar="sample_index_file",help="Genecatalog index file",nargs=1)
parser.add_argument("-kf",metavar="sample_fasta",help="Fasta-file for all genecatalog sequences",nargs=1)
parser.add_argument("-sfi",metavar="sample_list",help="A list of genecatalog index files and fasta files",nargs=1)
#parser.add_argument("-sfl",metavar="sample_fasta_list",help="Fasta-files list for all genecatalog sequences",nargs=1)
parser.add_argument("-c",metavar="Cluster-name",help="Cluster-file",nargs=1,required=True)
parser.add_argument("-o",metavar="Output",help="Output name",nargs=1)
parser.add_argument("-num",help="Adds 2 coloumns to a new file, with cluster_id\'s, number of sample-genes and number of uniprot ID\'s",action="store_true")
parser.add_argument("-v",help="Verbose. Prints out progress and details to stdout output. Write \"-v\" with no arguments in commandline. Default is off.",action="store_true")
#return parser.parse_args("-o testcluster.argsO_tester".split()), parser #testing on windows
#return parser.parse_args("".split()), parser #testing on window
# self.args = parser.parse_args()
self.parser = parser
def fileIn(self,infile):
if infile[-3:] == ".gz":
return gzip.open(infile,"r")
else:
return open(infile,"r")
def fileOut(self,outfile):
return open(outfile, "w")
def fileClose(self,cfile):
cfile.close()
'''
def dictMaker(i,D_ID): #Create dictionary from index-text file
D = {}
if i[0].split(".")[-1] == "index":
indexline = ""
for el in D_ID:
indexline = el.rstrip().split("\t")
D[indexline[0]] = [indexline[1],indexline[2]]
self.printer("\n\nDICTIONARY DONE!!!!\n\n")
return D
else:
return pickle.load(D_ID)
'''
def dictMaker(self,i,D_ID, j): #Create dictionary from index-text file
if i.split(".")[-1] == "indexed":
indexline = ""
for el in D_ID:
indexline = el.rstrip().split("\t")
self._D[indexline[0]] = [indexline[1],indexline[2], j]
self.printer("\nDictionary done, time used (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
return 1
# else:
# print "Check index file names. :" + i
# self._D = pickle.load(D_ID)
# self.printer("\nDictionary done, time used (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
# return 1
def missingGeneLog(self,genecat,uniprot):
log = self.fileOut(self.args.o[0] + ".missingGenes.log")
for el in genecat:
log.write(el[0]+"\t"+el[1]+"\n")
for el in uniprot:
log.write(el[0]+"\t"+el[1]+"\n")
self.fileClose(log)
def seqExtracter3(self,ID,myD,uni): #Dictionary look-up, one big dictionary
if ID in myD:
start = int(myD[ID][0])
stop = int(myD[ID][1])
if uni == 1:
self.uniprotFasta.seek(start)
seq = self.uniprotFasta.read(stop-start)
seq = "".join(seq.split("\n"))
return seq,1
else:
fasta = self.fileIn(self._F[int(myD[ID][2])][1])
fasta.seek(start)
seq = fasta.read(stop-start)
seq = "".join(seq.split("\n"))
self.fileClose(fasta)
return seq,1
else:
return "",0
def seqExtracter(self,ID,myD,fasta,uni): #Dictionary look-up, one big dictionary
if ID in myD:
start = int(myD[ID][0])
stop = int(myD[ID][1])
fasta.seek(start)
seq = fasta.read(stop-start)
seq = "".join(seq.split("\n"))
return seq,1
else:
return "",0
def seqExtracter2(self,ID,myD,fasta): #Dictionary look-up, each key is first gene letter
start = int(myD[ID[0]][ID][0])
stop = int(myD[ID[0]][ID][1])
fasta.seek(start)
seq = fasta.read(stop-start)
seq = "".join(seq.split("\n"))
return seq
def genecat_list(self):
clusterID =self.fileIn(self.args.c[0])
output = self.fileOut(self.args.o[0]+".genecatalog.fasta")
self._F = {}
infiles=0
for line in file(self.args.sfi[0]):
index = line.split("\t")[0]
fasta = line.split("\t")[1].strip("\n")
self._F[infiles] = [index,fasta]
genecatID = self.fileIn(index)
a = self.dictMaker(index,genecatID,infiles) #takes time
if a ==1 : self.printer("DictMaker worked for " + index)
else: self.printer("DictMaker did not work, check index files " + index)
self.fileClose(genecatID)
infiles+=1
suc = 0
missing = []
seq = ""
for line in clusterID:
L = line.rstrip().split("\t")
C = str(L[0]) #clusterID
L2 = L[2].split(",")
for el in L2:
seq,suc = self.seqExtracter3(el,self._D,0)
if suc == 1:
output.write(">"+C+":"+el+"\n"+seq+"\n")
else:
missing.append([el,C])
#print self._D
self._D = {}
self.fileClose(output)
self.fileClose(clusterID)
return missing
def genecat(self,args,parser):
clusterID =self.fileIn(args.c[0])
genecatID = self.fileIn(args.ki[0])
genecatFasta = self.fileIn(args.kf[0])
output = self.fileOut(args.o[0]+".genecatalog.fasta")
a = self.dictMaker(args.ki[0],genecatID,0) #takes time
if a ==1 : self.printer("DictMaker worked for " + args.ki[0])
else: self.printer("DictMaker did not work, check index files " + args.ki[0])
self.fileClose(genecatID)
GenecatalogD = {}
cGenecatalog = 1
suc = 0
missing = []
seq = ""
for line in clusterID:
L = line.rstrip().split("\t")
C = str(L[0]) #clusterID
L2 = L[2].split(",")
for el in L2:
seq,suc = self.seqExtracter(el,self._D,genecatFasta,0)
if suc == 1:
if el not in GenecatalogD:
GenecatalogD[el] = el[0]+str(cGenecatalog)
cGenecatalog += 1
#output.write(">"+C+"_"+GenecatalogD[el]+"\n"+seq+"\n")
output.write(">"+C+":"+el+"\n"+seq+"\n")
else:
missing.append([el,C])
#print self._D
self._D = {}
# GenecatalogIDconversion(GenecatalogD)
self.fileClose(output)
self.fileClose(genecatFasta)
self.fileClose(clusterID)
return missing
def uniprot(self,args,parser):
clusterID = self.fileIn(args.c[0])
uniprotID = self.fileIn(args.ui[0])
self.uniprotFasta = self.fileIn(args.uf[0])
ctotfile = os.popen("wc -l "+args.c[0])
ctot = ctotfile.read()
ctotfile.close()
ctot = int(ctot.split(" ")[0])
rangelist = range(0,ctot,1)
output = self.fileOut(args.o[0]+".uniprotids.fasta")
D = self.dictMaker(args.ui[0],uniprotID,0) #takes time
if D ==1 : self.printer("DictMaker worked for " + args.ui[0])
else: self.printer("DictMaker did not work, check index files " + args.ui[0])
self.fileClose(uniprotID)
seq = ""
missing = []
suc = 1
c = 0
for line in clusterID:
c+=1
L = line.rstrip().split("\t")
C = str(L[0]) #clusterID
if L[1] == "N":
continue
L2 = L[3].split(",")
for el in L2:
el = el.split("|")[2]
seq,suc = self.seqExtracter3(el,self._D,1)
if suc == 1:
output.write(">"+C+":"+el+"\n"+seq+"\n")
else:
missing.append([el,C])
#if c in rangelist:
#self.printer("FINISHED "+str(c)+" ENTRIES out of "+str(ctot))
del D
self.fileClose(output)
self.fileClose(self.uniprotFasta)
self.fileClose(clusterID)
return missing
def GenecatalogIDconversion(self,D):
self.printer("\nPrinting GeneConversionTable....")
fout = self.fileOut("GeneConversionTable.txt")
for key in D:
fout.write(key+"\t"+D[key]+"\n")
fout.close()
self.printer("DONE!\n")
def numberCounter(self,args,parser):
clusterID = self.fileIn(args.c[0])
if self.args.o:
output = self.fileOut(args.o[0]+".genenumbers")
else:
output = self.fileOut(args.c[0]+".genenumbers")
t = "\t"
n = "\n"
for line in clusterID:
L = line.split("\t")
output.write(L[0]+t+str(len(L[1].split(",")))+t+str(len(set(L[2].split(","))))+n)
self.fileClose(clusterID)
self.fileClose(output)
def printer(self,string): #surpressing output print if -q (quiet) is on
# if not self.args.quiet:
if self.args.v:
print string,
def read_columns(self, i, csv_file):
item=""
with open(csv_file, 'r') as csvfile:
for line in csvfile.readlines():
array = line.strip("\n").split('\t')
item = item + "\n" + array[i]
return item
def mainthing(self):
# self.printer("\n***cluster2fasta.py initialized at "\
# + self.d_.strftime("%H:%M %d/%m-%Y") + "***\n")
# self.printer("Arguments:\n")
# self.parseArgs()
no = 1
missing1 = []
missing2 = []
if bool(self.args.ki)^bool(self.args.kf):
self.printer("***ERROR!*** Only one of -ki and -kf was provided!\n")
elif bool(self.args.ui)^bool(self.args.uf):
self.printer("***ERROR!*** Only one of -ui and -uf was provided!\n")
elif not self.args.c:
self.printer("***ERROR!*** No cluster-files(s) provided!\n")
elif (self.args.ki or self.args.ui) and not self.args.o:
self.printer("***ERROR!*** No output-name provided!\n")
else:
if self.args.ki and self.args.kf and self.args.c and self.args.o:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tGenecatalog-index file: "+self.args.ki[0]+"\n\tGenecatalog fasta-file: "+self.args.kf[0]+"\n\tOutput file-name: "+self.args.o[0]+".genecatgenes.fasta\n")
no = 0
missing1 = self.genecat(self.args,self.parser)
self.printer("\nGenecatalog Genes Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if self.args.sfi and self.args.c and self.args.o:
self.printer("\tCluster-file: \n\t\t"+self.args.c[0] +"\n\tGenecatalog-index files: \n\t\t"+self.read_columns(0, self.args.sfi[0])+"\n\tGenecatalog fasta-files: \n\t\t"+self.read_columns(1, self.args.sfi[0])+"\n\tOutput file-name: \n\t\t"+ self.args.o[0]+".genecatgenes.fasta.gz\n")
no = 0
missing1 = self.genecat_list()
self.printer("\nGenecatalog Genes Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if self.args.ui and self.args.uf and self.args.c and self.args.o:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tUniprot-index file: "+self.args.ui[0]+"\n\tUniprot fasta-file: "+self.args.uf[0]+"\n\tOutput file-name: "+self.args.o[0]+".uniprotids.fasta\n")
no = 0
missing2 = self.uniprot(self.args,self.parser)
self.printer("\nUniprot ID\'s Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if self.args.num and self.args.c:
if not self.args.o:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tOutput file-name: "+self.args.c[0][:-4]+".genenumbers\n")
else:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tOutput file-name: "+self.args.o[0]+".genenumbers\n")
no = 0
self.numberCounter(self.args,self.parser)
self.printer("\nNumber Calculations Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if no == 1:
self.printer("none!\n")
self.missingGeneLog(missing1,missing2)
timeused = (time.time() - self.start) / 60
self.printer("Time used: "+str(round(timeused*60))\
+ " seconds ("+str(round(timeused)) + " min)\n")
def test(self,num):
self.printer("test")
'''
if __name__ == "__main__":
myclass = main
myclass.mainthing
myclass.test(2)
self.printer("yoyoooyooyoo")
'''
if __name__ == "__main__":
try:
myclass = main()
myclass.args = myclass.parser.parse_args(sys.argv[1:])
myclass.printer("\n### "+sys.argv[0]+" initialized at "+ myclass.timestarted + "\n")
myclass.printer("### OPTIONS: "+str(myclass.args)+"\n")
myclass.mainthing()
#except IOError as i:
# print "I/O error({0}): {1}".format(i.errno, i.strerror)
except Exception,e:
print str(e)
import traceback
traceback.print_exc()
##############################
'''
INPUT:
The User inputs an index-file and a fasta-file.
The index file indexes each entry in the fasta file. In the case of -ui and -uf,
-ui would a pickle-file which contains the start and end for the sequences in each
entry of the uniprot file (-uf).
if -num is toggled, the script will not create a fasta-output, but instead
show the number of genecat-genes (sample-genes) and uniprot ID's in each cluster.
OUTPUT:
The output is a fasta file containing the sequences of each uniprot/genecat-gene in the input
from the clusters.
OPTIONS LIST:
"-ui" "uniprot_index_file": Uniprot index file containing
"-uf" "uniprot_fasta": Fasta-file for all uniprot (from swiss2fasta)
"-ki" "sample_index_file": Sample index file
"-kf" "sample_fasta": Fasta-file for all sample sequences
"-c" "Cluster-name": Cluster-file
"-o" "Output fasta file": Output name
"-num": Adds 2 coloumns to a new file, with cluster_id's, number of sample-genes and number of uniprot ID's
'''
| gpl-3.0 |
KontorConsulting/odoo | addons/l10n_be/wizard/l10n_be_partner_vat_listing.py | 302 | 14738 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Corrections & modifications by Noviat nv/sa, (http://www.noviat.be):
# - VAT listing based upon year in stead of fiscal year
# - sql query adapted to select only 'tax-out' move lines
# - extra button to print readable PDF report
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from openerp.tools.translate import _
from openerp.osv import fields, osv
from openerp.report import report_sxw
class vat_listing_clients(osv.osv_memory):
_name = 'vat.listing.clients'
_columns = {
'name': fields.char('Client Name'),
'vat': fields.char('VAT'),
'turnover': fields.float('Base Amount'),
'vat_amount': fields.float('VAT Amount'),
}
class partner_vat(osv.osv_memory):
""" Vat Listing """
_name = "partner.vat"
def get_partner(self, cr, uid, ids, context=None):
obj_period = self.pool.get('account.period')
obj_partner = self.pool.get('res.partner')
obj_vat_lclient = self.pool.get('vat.listing.clients')
obj_model_data = self.pool.get('ir.model.data')
obj_module = self.pool.get('ir.module.module')
data = self.read(cr, uid, ids)[0]
year = data['year']
date_start = year + '-01-01'
date_stop = year + '-12-31'
if context.get('company_id', False):
company_id = context['company_id']
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
period_ids = obj_period.search(cr, uid, [('date_start' ,'>=', date_start), ('date_stop','<=',date_stop), ('company_id','=',company_id)])
if not period_ids:
raise osv.except_osv(_('Insufficient Data!'), _('No data for the selected year.'))
partners = []
partner_ids = obj_partner.search(cr, uid, [('vat_subjected', '!=', False), ('vat','ilike','BE%')], context=context)
if not partner_ids:
raise osv.except_osv(_('Error'),_('No belgium contact with a VAT number in your database.'))
cr.execute("""SELECT sub1.partner_id, sub1.name, sub1.vat, sub1.turnover, sub2.vat_amount
FROM (SELECT l.partner_id, p.name, p.vat, SUM(CASE WHEN c.code ='49' THEN -l.tax_amount ELSE l.tax_amount END) as turnover
FROM account_move_line l
LEFT JOIN res_partner p ON l.partner_id = p.id
LEFT JOIN account_tax_code c ON l.tax_code_id = c.id
WHERE c.code IN ('00','01','02','03','45','49')
AND l.partner_id IN %s
AND l.period_id IN %s
GROUP BY l.partner_id, p.name, p.vat) AS sub1
LEFT JOIN (SELECT l2.partner_id, SUM(CASE WHEN c2.code ='64' THEN -l2.tax_amount ELSE l2.tax_amount END) as vat_amount
FROM account_move_line l2
LEFT JOIN account_tax_code c2 ON l2.tax_code_id = c2.id
WHERE c2.code IN ('54','64')
AND l2.partner_id IN %s
AND l2.period_id IN %s
GROUP BY l2.partner_id) AS sub2 ON sub1.partner_id = sub2.partner_id
""",(tuple(partner_ids),tuple(period_ids),tuple(partner_ids),tuple(period_ids)))
for record in cr.dictfetchall():
record['vat'] = record['vat'].replace(' ','').upper()
if record['turnover'] >= data['limit_amount']:
id_client = obj_vat_lclient.create(cr, uid, record, context=context)
partners.append(id_client)
if not partners:
raise osv.except_osv(_('Insufficient Data!'), _('No data found for the selected year.'))
context.update({'partner_ids': partners, 'year': data['year'], 'limit_amount': data['limit_amount']})
model_data_ids = obj_model_data.search(cr, uid, [('model','=','ir.ui.view'), ('name','=','view_vat_listing')])
resource_id = obj_model_data.read(cr, uid, model_data_ids, fields=['res_id'])[0]['res_id']
return {
'name': _('Vat Listing'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.list',
'views': [(resource_id,'form')],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
}
_columns = {
'year': fields.char('Year', size=4, required=True),
'limit_amount': fields.integer('Limit Amount', required=True),
}
_defaults={
'year': lambda *a: str(int(time.strftime('%Y'))-1),
'limit_amount': 250,
}
class partner_vat_list(osv.osv_memory):
""" Partner Vat Listing """
_name = "partner.vat.list"
_columns = {
'partner_ids': fields.many2many('vat.listing.clients', 'vat_partner_rel', 'vat_id', 'partner_id', 'Clients', help='You can remove clients/partners which you do not want to show in xml file'),
'name': fields.char('File Name'),
'file_save' : fields.binary('Save File', readonly=True),
'comments': fields.text('Comments'),
}
def _get_partners(self, cr, uid, context=None):
return context.get('partner_ids', [])
_defaults={
'partner_ids': _get_partners,
}
def _get_datas(self, cr, uid, ids, context=None):
obj_vat_lclient = self.pool.get('vat.listing.clients')
datas = []
data = self.read(cr, uid, ids)[0]
for partner in data['partner_ids']:
if isinstance(partner, list) and partner:
datas.append(partner[2])
else:
client_data = obj_vat_lclient.read(cr, uid, partner, context=context)
datas.append(client_data)
client_datas = []
seq = 0
sum_tax = 0.00
sum_turnover = 0.00
amount_data = {}
for line in datas:
if not line:
continue
seq += 1
sum_tax += line['vat_amount']
sum_turnover += line['turnover']
vat = line['vat'].replace(' ','').upper()
amount_data ={
'seq': str(seq),
'vat': vat,
'only_vat': vat[2:],
'turnover': '%.2f' %line['turnover'],
'vat_amount': '%.2f' %line['vat_amount'],
'sum_tax': '%.2f' %sum_tax,
'sum_turnover': '%.2f' %sum_turnover,
'partner_name': line['name'],
}
client_datas += [amount_data]
return client_datas
def create_xml(self, cr, uid, ids, context=None):
obj_sequence = self.pool.get('ir.sequence')
obj_users = self.pool.get('res.users')
obj_partner = self.pool.get('res.partner')
obj_model_data = self.pool.get('ir.model.data')
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
obj_cmpny = obj_users.browse(cr, uid, uid, context=context).company_id
company_vat = obj_cmpny.partner_id.vat
if not company_vat:
raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with the company.'))
company_vat = company_vat.replace(' ','').upper()
SenderId = company_vat[2:]
issued_by = company_vat[:2]
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
dnum = company_vat[2:] + seq_declarantnum[-4:]
street = city = country = ''
addr = obj_partner.address_get(cr, uid, [obj_cmpny.partner_id.id], ['invoice'])
if addr.get('invoice',False):
ads = obj_partner.browse(cr, uid, [addr['invoice']], context=context)[0]
phone = ads.phone and ads.phone.replace(' ','') or ''
email = ads.email or ''
name = ads.name or ''
city = ads.city or ''
zip = obj_partner.browse(cr, uid, ads.id, context=context).zip or ''
if not city:
city = ''
if ads.street:
street = ads.street
if ads.street2:
street += ' ' + ads.street2
if ads.country_id:
country = ads.country_id.code
data = self.read(cr, uid, ids)[0]
comp_name = obj_cmpny.name
if not email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
annual_listing_data = {
'issued_by': issued_by,
'company_vat': company_vat,
'comp_name': comp_name,
'street': street,
'zip': zip,
'city': city,
'country': country,
'email': email,
'phone': phone,
'SenderId': SenderId,
'period': context['year'],
'comments': data['comments'] or ''
}
data_file = """<?xml version="1.0" encoding="ISO-8859-1"?>
<ns2:ClientListingConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/ClientListingConsignment" ClientListingsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(SenderId)s</RepresentativeID>
<Name>%(comp_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(zip)s</PostCode>
<City>%(city)s</City>"""
if annual_listing_data['country']:
data_file +="\n\t\t<CountryCode>%(country)s</CountryCode>"
data_file += """
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>"""
data_file = data_file % annual_listing_data
data_comp = """
<ns2:Declarant>
<VATNumber>%(SenderId)s</VATNumber>
<Name>%(comp_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(zip)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Declarant>
<ns2:Period>%(period)s</ns2:Period>
""" % annual_listing_data
# Turnover and Farmer tags are not included
client_datas = self._get_datas(cr, uid, ids, context=context)
if not client_datas:
raise osv.except_osv(_('Data Insufficient!'),_('No data available for the client.'))
data_client_info = ''
for amount_data in client_datas:
data_client_info += """
<ns2:Client SequenceNumber="%(seq)s">
<ns2:CompanyVATNumber issuedBy="BE">%(only_vat)s</ns2:CompanyVATNumber>
<ns2:TurnOver>%(turnover)s</ns2:TurnOver>
<ns2:VATAmount>%(vat_amount)s</ns2:VATAmount>
</ns2:Client>""" % amount_data
amount_data_begin = client_datas[-1]
amount_data_begin.update({'dnum':dnum})
data_begin = """
<ns2:ClientListing SequenceNumber="1" ClientsNbr="%(seq)s" DeclarantReference="%(dnum)s"
TurnOverSum="%(sum_turnover)s" VATAmountSum="%(sum_tax)s">
""" % amount_data_begin
data_end = """
<ns2:Comment>%(comments)s</ns2:Comment>
</ns2:ClientListing>
</ns2:ClientListingConsignment>
""" % annual_listing_data
data_file += data_begin + data_comp + data_client_info + data_end
file_save = base64.encodestring(data_file.encode('utf8'))
self.write(cr, uid, ids, {'file_save':file_save, 'name':'vat_list.xml'}, context=context)
model_data_ids = obj_model_data.search(cr, uid, [('model','=','ir.ui.view'), ('name','=','view_vat_listing_result')])
resource_id = obj_model_data.read(cr, uid, model_data_ids, fields=['res_id'])[0]['res_id']
return {
'name': _('XML File has been Created'),
'res_id': ids[0],
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.list',
'views': [(resource_id,'form')],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
}
def print_vatlist(self, cr, uid, ids, context=None):
if context is None:
context = {}
datas = {'ids': []}
datas['model'] = 'res.company'
datas['year'] = context['year']
datas['limit_amount'] = context['limit_amount']
datas['client_datas'] = self._get_datas(cr, uid, ids, context=context)
if not datas['client_datas']:
raise osv.except_osv(_('Error!'), _('No record to print.'))
return self.pool['report'].get_action(
cr, uid, [], 'l10n_be.report_l10nvatpartnerlisting', data=datas, context=context
)
class partner_vat_listing_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(partner_vat_listing_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
})
def set_context(self, objects, data, ids, report_type=None):
client_datas = data['client_datas']
self.localcontext.update( {
'year': data['year'],
'sum_turnover': client_datas[-1]['sum_turnover'],
'sum_tax': client_datas[-1]['sum_tax'],
'client_list': client_datas,
})
super(partner_vat_listing_print, self).set_context(objects, data, ids)
class wrapped_vat_listing_print(osv.AbstractModel):
_name = 'report.l10n_be.report_l10nvatpartnerlisting'
_inherit = 'report.abstract_report'
_template = 'l10n_be.report_l10nvatpartnerlisting'
_wrapped_report_class = partner_vat_listing_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dgzurita/odoo | addons/association/__openerp__.py | 260 | 1700 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Associations Management',
'version': '0.1',
'category': 'Specific Industry Applications',
'description': """
This module is to configure modules related to an association.
==============================================================
It installs the profile for associations to manage events, registrations, memberships,
membership products (schemes).
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'membership', 'event'],
'data': ['security/ir.model.access.csv', 'profile_association.xml'],
'demo': [],
'installable': True,
'auto_install': False,
'website': 'https://www.odoo.com'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cloudera/hue | desktop/core/ext-py/djangorestframework-3.9.4/rest_framework/schemas/generators.py | 3 | 14992 | """
generators.py # Top-down schema generation
See schemas.__init__.py for package overview.
"""
import re
from collections import Counter, OrderedDict
from importlib import import_module
from django.conf import settings
from django.contrib.admindocs.views import simplify_regex
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.utils import six
from rest_framework import exceptions
from rest_framework.compat import (
URLPattern, URLResolver, coreapi, coreschema, get_original_route
)
from rest_framework.request import clone_request
from rest_framework.settings import api_settings
from rest_framework.utils.model_meta import _get_pk
from .utils import is_list_view
def common_path(paths):
split_paths = [path.strip('/').split('/') for path in paths]
s1 = min(split_paths)
s2 = max(split_paths)
common = s1
for i, c in enumerate(s1):
if c != s2[i]:
common = s1[:i]
break
return '/' + '/'.join(common)
def get_pk_name(model):
meta = model._meta.concrete_model._meta
return _get_pk(meta).name
def is_api_view(callback):
"""
Return `True` if the given view callback is a REST framework view/viewset.
"""
# Avoid import cycle on APIView
from rest_framework.views import APIView
cls = getattr(callback, 'cls', None)
return (cls is not None) and issubclass(cls, APIView)
INSERT_INTO_COLLISION_FMT = """
Schema Naming Collision.
coreapi.Link for URL path {value_url} cannot be inserted into schema.
Position conflicts with coreapi.Link for URL path {target_url}.
Attempted to insert link with keys: {keys}.
Adjust URLs to avoid naming collision or override `SchemaGenerator.get_keys()`
to customise schema structure.
"""
class LinkNode(OrderedDict):
def __init__(self):
self.links = []
self.methods_counter = Counter()
super(LinkNode, self).__init__()
def get_available_key(self, preferred_key):
if preferred_key not in self:
return preferred_key
while True:
current_val = self.methods_counter[preferred_key]
self.methods_counter[preferred_key] += 1
key = '{}_{}'.format(preferred_key, current_val)
if key not in self:
return key
def insert_into(target, keys, value):
"""
Nested dictionary insertion.
>>> example = {}
>>> insert_into(example, ['a', 'b', 'c'], 123)
>>> example
LinkNode({'a': LinkNode({'b': LinkNode({'c': LinkNode(links=[123])}}})))
"""
for key in keys[:-1]:
if key not in target:
target[key] = LinkNode()
target = target[key]
try:
target.links.append((keys[-1], value))
except TypeError:
msg = INSERT_INTO_COLLISION_FMT.format(
value_url=value.url,
target_url=target.url,
keys=keys
)
raise ValueError(msg)
def distribute_links(obj):
for key, value in obj.items():
distribute_links(value)
for preferred_key, link in obj.links:
key = obj.get_available_key(preferred_key)
obj[key] = link
def is_custom_action(action):
return action not in {
'retrieve', 'list', 'create', 'update', 'partial_update', 'destroy'
}
def endpoint_ordering(endpoint):
path, method, callback = endpoint
method_priority = {
'GET': 0,
'POST': 1,
'PUT': 2,
'PATCH': 3,
'DELETE': 4
}.get(method, 5)
return (path, method_priority)
_PATH_PARAMETER_COMPONENT_RE = re.compile(
r'<(?:(?P<converter>[^>:]+):)?(?P<parameter>\w+)>'
)
class EndpointEnumerator(object):
"""
A class to determine the available API endpoints that a project exposes.
"""
def __init__(self, patterns=None, urlconf=None):
if patterns is None:
if urlconf is None:
# Use the default Django URL conf
urlconf = settings.ROOT_URLCONF
# Load the given URLconf module
if isinstance(urlconf, six.string_types):
urls = import_module(urlconf)
else:
urls = urlconf
patterns = urls.urlpatterns
self.patterns = patterns
def get_api_endpoints(self, patterns=None, prefix=''):
"""
Return a list of all available API endpoints by inspecting the URL conf.
"""
if patterns is None:
patterns = self.patterns
api_endpoints = []
for pattern in patterns:
path_regex = prefix + get_original_route(pattern)
if isinstance(pattern, URLPattern):
path = self.get_path_from_regex(path_regex)
callback = pattern.callback
if self.should_include_endpoint(path, callback):
for method in self.get_allowed_methods(callback):
endpoint = (path, method, callback)
api_endpoints.append(endpoint)
elif isinstance(pattern, URLResolver):
nested_endpoints = self.get_api_endpoints(
patterns=pattern.url_patterns,
prefix=path_regex
)
api_endpoints.extend(nested_endpoints)
api_endpoints = sorted(api_endpoints, key=endpoint_ordering)
return api_endpoints
def get_path_from_regex(self, path_regex):
"""
Given a URL conf regex, return a URI template string.
"""
path = simplify_regex(path_regex)
# Strip Django 2.0 convertors as they are incompatible with uritemplate format
path = re.sub(_PATH_PARAMETER_COMPONENT_RE, r'{\g<parameter>}', path)
return path
def should_include_endpoint(self, path, callback):
"""
Return `True` if the given endpoint should be included.
"""
if not is_api_view(callback):
return False # Ignore anything except REST framework views.
if callback.cls.schema is None:
return False
if 'schema' in callback.initkwargs:
if callback.initkwargs['schema'] is None:
return False
if path.endswith('.{format}') or path.endswith('.{format}/'):
return False # Ignore .json style URLs.
return True
def get_allowed_methods(self, callback):
"""
Return a list of the valid HTTP methods for this endpoint.
"""
if hasattr(callback, 'actions'):
actions = set(callback.actions)
http_method_names = set(callback.cls.http_method_names)
methods = [method.upper() for method in actions & http_method_names]
else:
methods = callback.cls().allowed_methods
return [method for method in methods if method not in ('OPTIONS', 'HEAD')]
class SchemaGenerator(object):
# Map HTTP methods onto actions.
default_mapping = {
'get': 'retrieve',
'post': 'create',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
}
endpoint_inspector_cls = EndpointEnumerator
# Map the method names we use for viewset actions onto external schema names.
# These give us names that are more suitable for the external representation.
# Set by 'SCHEMA_COERCE_METHOD_NAMES'.
coerce_method_names = None
# 'pk' isn't great as an externally exposed name for an identifier,
# so by default we prefer to use the actual model field name for schemas.
# Set by 'SCHEMA_COERCE_PATH_PK'.
coerce_path_pk = None
def __init__(self, title=None, url=None, description=None, patterns=None, urlconf=None):
assert coreapi, '`coreapi` must be installed for schema support.'
assert coreschema, '`coreschema` must be installed for schema support.'
if url and not url.endswith('/'):
url += '/'
self.coerce_method_names = api_settings.SCHEMA_COERCE_METHOD_NAMES
self.coerce_path_pk = api_settings.SCHEMA_COERCE_PATH_PK
self.patterns = patterns
self.urlconf = urlconf
self.title = title
self.description = description
self.url = url
self.endpoints = None
def get_schema(self, request=None, public=False):
"""
Generate a `coreapi.Document` representing the API schema.
"""
if self.endpoints is None:
inspector = self.endpoint_inspector_cls(self.patterns, self.urlconf)
self.endpoints = inspector.get_api_endpoints()
links = self.get_links(None if public else request)
if not links:
return None
url = self.url
if not url and request is not None:
url = request.build_absolute_uri()
distribute_links(links)
return coreapi.Document(
title=self.title, description=self.description,
url=url, content=links
)
def get_links(self, request=None):
"""
Return a dictionary containing all the links that should be
included in the API schema.
"""
links = LinkNode()
# Generate (path, method, view) given (path, method, callback).
paths = []
view_endpoints = []
for path, method, callback in self.endpoints:
view = self.create_view(callback, method, request)
path = self.coerce_path(path, method, view)
paths.append(path)
view_endpoints.append((path, method, view))
# Only generate the path prefix for paths that will be included
if not paths:
return None
prefix = self.determine_path_prefix(paths)
for path, method, view in view_endpoints:
if not self.has_view_permissions(path, method, view):
continue
link = view.schema.get_link(path, method, base_url=self.url)
subpath = path[len(prefix):]
keys = self.get_keys(subpath, method, view)
insert_into(links, keys, link)
return links
# Methods used when we generate a view instance from the raw callback...
def determine_path_prefix(self, paths):
"""
Given a list of all paths, return the common prefix which should be
discounted when generating a schema structure.
This will be the longest common string that does not include that last
component of the URL, or the last component before a path parameter.
For example:
/api/v1/users/
/api/v1/users/{pk}/
The path prefix is '/api/v1/'
"""
prefixes = []
for path in paths:
components = path.strip('/').split('/')
initial_components = []
for component in components:
if '{' in component:
break
initial_components.append(component)
prefix = '/'.join(initial_components[:-1])
if not prefix:
# We can just break early in the case that there's at least
# one URL that doesn't have a path prefix.
return '/'
prefixes.append('/' + prefix + '/')
return common_path(prefixes)
def create_view(self, callback, method, request=None):
"""
Given a callback, return an actual view instance.
"""
view = callback.cls(**getattr(callback, 'initkwargs', {}))
view.args = ()
view.kwargs = {}
view.format_kwarg = None
view.request = None
view.action_map = getattr(callback, 'actions', None)
actions = getattr(callback, 'actions', None)
if actions is not None:
if method == 'OPTIONS':
view.action = 'metadata'
else:
view.action = actions.get(method.lower())
if request is not None:
view.request = clone_request(request, method)
return view
def has_view_permissions(self, path, method, view):
"""
Return `True` if the incoming request has the correct view permissions.
"""
if view.request is None:
return True
try:
view.check_permissions(view.request)
except (exceptions.APIException, Http404, PermissionDenied):
return False
return True
def coerce_path(self, path, method, view):
"""
Coerce {pk} path arguments into the name of the model field,
where possible. This is cleaner for an external representation.
(Ie. "this is an identifier", not "this is a database primary key")
"""
if not self.coerce_path_pk or '{pk}' not in path:
return path
model = getattr(getattr(view, 'queryset', None), 'model', None)
if model:
field_name = get_pk_name(model)
else:
field_name = 'id'
return path.replace('{pk}', '{%s}' % field_name)
# Method for generating the link layout....
def get_keys(self, subpath, method, view):
"""
Return a list of keys that should be used to layout a link within
the schema document.
/users/ ("users", "list"), ("users", "create")
/users/{pk}/ ("users", "read"), ("users", "update"), ("users", "delete")
/users/enabled/ ("users", "enabled") # custom viewset list action
/users/{pk}/star/ ("users", "star") # custom viewset detail action
/users/{pk}/groups/ ("users", "groups", "list"), ("users", "groups", "create")
/users/{pk}/groups/{pk}/ ("users", "groups", "read"), ("users", "groups", "update"), ("users", "groups", "delete")
"""
if hasattr(view, 'action'):
# Viewsets have explicitly named actions.
action = view.action
else:
# Views have no associated action, so we determine one from the method.
if is_list_view(subpath, method, view):
action = 'list'
else:
action = self.default_mapping[method.lower()]
named_path_components = [
component for component
in subpath.strip('/').split('/')
if '{' not in component
]
if is_custom_action(action):
# Custom action, eg "/users/{pk}/activate/", "/users/active/"
if len(view.action_map) > 1:
action = self.default_mapping[method.lower()]
if action in self.coerce_method_names:
action = self.coerce_method_names[action]
return named_path_components + [action]
else:
return named_path_components[:-1] + [action]
if action in self.coerce_method_names:
action = self.coerce_method_names[action]
# Default action, eg "/users/", "/users/{pk}/"
return named_path_components + [action]
| apache-2.0 |
tloredo/batse5bp | doc/sphinxext/numpydoc.py | 64 | 4058 | """
========
numpydoc
========
Sphinx extension that handles docstrings in the Numpy standard format. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined otherwise.
.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard
"""
import os, re, pydoc
from docscrape_sphinx import get_doc_object, SphinxDocString
import inspect
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
if what == 'module':
# Strip top title
title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
re.I|re.S)
lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
else:
doc = get_doc_object(obj, what, "\n".join(lines))
lines[:] = str(doc).split("\n")
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
if hasattr(obj, '__module__'):
v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += ['', '.. htmlonly::', '']
lines += [' %s' % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# replace reference numbers so that there are no duplicates
references = []
for l in lines:
l = l.strip()
if l.startswith('.. ['):
try:
references.append(int(l[len('.. ['):l.index(']')]))
except ValueError:
print "WARNING: invalid reference in %s docstring" % name
# Start renaming from the biggest number, otherwise we may
# overwrite references.
references.sort()
if references:
for i, line in enumerate(lines):
for r in references:
new_r = reference_offset[0] + r
lines[i] = lines[i].replace('[%d]_' % r,
'[%d]_' % new_r)
lines[i] = lines[i].replace('.. [%d]' % r,
'.. [%d]' % new_r)
reference_offset[0] += len(references)
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
'initializes x; see ' in pydoc.getdoc(obj.__init__)):
return '', ''
if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
if not hasattr(obj, '__doc__'): return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub("^[^(]*", "", doc['Signature'])
return sig, ''
def initialize(app):
try:
app.connect('autodoc-process-signature', mangle_signature)
except:
monkeypatch_sphinx_ext_autodoc()
def setup(app, get_doc_object_=get_doc_object):
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('builder-inited', initialize)
app.add_config_value('numpydoc_edit_link', None, True)
#------------------------------------------------------------------------------
# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
#------------------------------------------------------------------------------
def monkeypatch_sphinx_ext_autodoc():
global _original_format_signature
import sphinx.ext.autodoc
if sphinx.ext.autodoc.format_signature is our_format_signature:
return
print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
_original_format_signature = sphinx.ext.autodoc.format_signature
sphinx.ext.autodoc.format_signature = our_format_signature
def our_format_signature(what, obj):
r = mangle_signature(None, what, None, obj, None, None, None)
if r is not None:
return r[0]
else:
return _original_format_signature(what, obj)
| bsd-2-clause |
solome/jyp | misc/virtenv/lib/python2.7/site-packages/setuptools/extension.py | 165 | 1731 | import sys
import re
import functools
import distutils.core
import distutils.extension
from setuptools.dist import _get_unpatched
_Extension = _get_unpatched(distutils.core.Extension)
def have_pyrex():
"""
Return True if Cython or Pyrex can be imported.
"""
pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'
for pyrex_impl in pyrex_impls:
try:
# from (pyrex_impl) import build_ext
__import__(pyrex_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def __init__(self, *args, **kw):
_Extension.__init__(self, *args, **kw)
self._convert_pyx_sources_to_lang()
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if have_pyrex():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
| mpl-2.0 |
Curso-OpenShift/Formulario | OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/django/contrib/flatpages/migrations/0001_initial.py | 308 | 1775 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FlatPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=100, verbose_name='URL', db_index=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('content', models.TextField(verbose_name='content', blank=True)),
('enable_comments', models.BooleanField(default=False, verbose_name='enable comments')),
('template_name', models.CharField(
help_text=(
"Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use "
"'flatpages/default.html'."
), max_length=70, verbose_name='template name', blank=True
)),
('registration_required', models.BooleanField(
default=False, help_text='If this is checked, only logged-in users will be able to view the page.',
verbose_name='registration required'
)),
('sites', models.ManyToManyField(to='sites.Site', verbose_name='sites')),
],
options={
'ordering': ('url',),
'db_table': 'django_flatpage',
'verbose_name': 'flat page',
'verbose_name_plural': 'flat pages',
},
bases=(models.Model,),
),
]
| gpl-3.0 |
GoogleCloudPlatform/professional-services | examples/bq-email-exports/export_query_results_function/main.py | 1 | 2698 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cloud Function for exporting BigQuery results from an anonymous table to GCS.
Triggered after BigQuery query is complete.
"""
import base64
import json
import logging
import os
import google.api_core.client_info
from google.cloud import bigquery
CLIENT_INFO = google.api_core.client_info.ClientInfo(
user_agent="google-pso-example/bq-email-exports")
def main(event, context):
"""Entrypoint for Cloud Function"""
data = base64.b64decode(event['data'])
upstream_bq_dts_obj = json.loads(data)
error = upstream_bq_dts_obj.get('errorStatus')
if error:
logging.error(
RuntimeError(f"Error in upstream query job: {error['message']}."))
else:
project_id = get_env('PROJECT_ID')
dataset_id = upstream_bq_dts_obj['destinationDatasetId']
table_name = upstream_bq_dts_obj['params'][
'destination_table_name_template']
schedule_time = upstream_bq_dts_obj['scheduleTime']
bq_client = bigquery.Client(client_info=CLIENT_INFO)
dataset_ref = bigquery.DatasetReference.from_string(
dataset_id, default_project=project_id)
table_ref = dataset_ref.table(table_name)
destination_uri = get_destination_uri(schedule_time)
extract_config = bigquery.ExtractJobConfig(
compression=get_env('COMPRESSION'),
destination_format=get_env('DEST_FMT'),
field_delimeter=get_env('FIELD_DELIMITER'),
use_avro_logical_types=get_env('USE_AVRO_TYPES'))
bq_client.extract_table(table_ref,
destination_uri,
job_id_prefix="email_export_",
job_config=extract_config)
print(
f"Exporting {project_id}:{dataset_id}.{table_name} to {destination_uri}"
)
def get_destination_uri(schedule_time):
"""Returns destination GCS URI for export"""
return (f"gs://{get_env('BUCKET_NAME')}/"
f"{schedule_time}/{get_env('OBJECT_NAME')}")
def get_env(name):
"""Returns environment variable"""
return os.environ[name]
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.