repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
brayoh/bucket-list-api | tests/base.py | 1 | 1997 | import unittest
import json
from app import create_app, db
from app.models import User
class Base(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.client = self.app.test_client()
self.user = json.dumps({
"username": "brian",
"password": "password"
})
with self.app.app_context():
db.create_all()
# register new user
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
# add new bucketlist
self.client.post("/api/v1/bucketlists",
data=json.dumps({
"name": "dare devil",
"description": "testing"
}),
headers=self.set_headers())
# add a new item
self.client.post("/api/v1/bucketlists/1/items",
data=json.dumps({
"name": "go bunjee jumping"
}),
headers=self.set_headers())
def set_headers(self):
""" Set headers for Authorization and Content Type. """
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
response = self.client.post("/api/v1/auth/login",
data=self.user,
content_type='application/json')
payload = json.loads(response.data.decode())
# get the token from the reponse body
self.token = payload['token']
return dict({
'Authorization': self.token,
'Content-Type': 'application/json',
})
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
| mit |
gameduell/duell | bin/win/python2.7.9/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 1730 | 1142 | from __future__ import absolute_import, division, unicode_literals
import re
from . import _base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(_base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| bsd-2-clause |
hkernbach/arangodb | 3rdParty/V8/v5.7.492.77/tools/gyp/test/win/gyptest-cl-buffer-security-check.py | 344 | 1612 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure buffer security check setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('buffer-security-check.gyp', chdir=CHDIR)
test.build('buffer-security-check.gyp', chdir=CHDIR)
def GetDisassemblyOfMain(exe):
# The standard library uses buffer security checks independent of our
# buffer security settings, so we extract just our code (i.e. main()) to
# check against.
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/disasm', full_path)
result = []
in_main = False
for line in output.splitlines():
if line == '_main:':
in_main = True
elif in_main:
# Disassembly of next function starts.
if line.startswith('_'):
break
result.append(line)
return '\n'.join(result)
# Buffer security checks are on by default, make sure security_cookie
# appears in the disassembly of our code.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_unset.exe'):
test.fail_test()
# Explicitly on.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_on.exe'):
test.fail_test()
# Explicitly off, shouldn't be a reference to the security cookie.
if 'security_cookie' in GetDisassemblyOfMain('test_bsc_off.exe'):
test.fail_test()
test.pass_test()
| apache-2.0 |
TeamNyx/external_chromium | chrome/common/extensions/docs/examples/apps/hello-python/main.py | 70 | 5222 | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.ext.webapp import template
from google.appengine.api.urlfetch import DownloadError
import oauth2
import urllib
import logging
import os
import time
from django.utils import simplejson
# Configuration
CONFIG = {
'oauth_consumer_key': 'anonymous',
'oauth_consumer_secret': 'anonymous',
'license_server': 'https://www.googleapis.com',
'license_path': '%(server)s/chromewebstore/v1/licenses/%(appid)s/%(userid)s',
'oauth_token': 'INSERT OAUTH TOKEN HERE',
'oauth_token_secret': 'INSERT OAUTH TOKEN SECRET HERE',
'app_id': 'INSERT APPLICATION ID HERE',
}
# Check to see if the server has been deployed. In the dev server, this
# env variable will start with 'Development', in production, it will start with
# 'Google App Engine'
IS_PRODUCTION = os.environ['SERVER_SOFTWARE'].startswith('Google App Engine')
# Valid access levels that may be returned by the license server.
VALID_ACCESS_LEVELS = ['FREE_TRIAL', 'FULL']
def fetch_license_data(userid):
"""Fetches the license for a given user by making an OAuth signed request
to the license server.
Args:
userid OpenID of the user you are checking access for.
Returns:
The server's response as text.
"""
url = CONFIG['license_path'] % {
'server': CONFIG['license_server'],
'appid': CONFIG['app_id'],
'userid': urllib.quote_plus(userid),
}
oauth_token = oauth2.Token(**{
'key': CONFIG['oauth_token'],
'secret': CONFIG['oauth_token_secret']
})
oauth_consumer = oauth2.Consumer(**{
'key': CONFIG['oauth_consumer_key'],
'secret': CONFIG['oauth_consumer_secret']
})
logging.debug('Requesting %s' % url)
client = oauth2.Client(oauth_consumer, oauth_token)
resp, content = client.request(url, 'GET')
logging.debug('Got response code %s, content %s' % (resp, content))
return content
def parse_license_data(userid):
"""Returns the license for a given user as a structured object.
Args:
userid: The OpenID of the user to check.
Returns:
An object with the following parameters:
error: True if something went wrong, False otherwise.
message: A descriptive message if error is True.
access: One of 'NO', 'FREE_TRIAL', or 'FULL' depending on the access.
"""
license = {'error': False, 'message': '', 'access': 'NO'}
try:
response_text = fetch_license_data(userid)
try:
logging.debug('Attempting to JSON parse: %s' % response_text)
json = simplejson.loads(response_text)
logging.debug('Got license server response: %s' % json)
except ValueError:
logging.exception('Could not parse response as JSON: %s' % response_text)
license['error'] = True
license['message'] = 'Could not parse the license server response'
except DownloadError:
logging.exception('Could not fetch license data')
license['error'] = True
license['message'] = 'Could not fetch license data'
if json.has_key('error'):
license['error'] = True
license['message'] = json['error']['message']
elif json['result'] == 'YES' and json['accessLevel'] in VALID_ACCESS_LEVELS:
license['access'] = json['accessLevel']
return license
class MainHandler(webapp.RequestHandler):
"""Request handler class."""
def get(self):
"""Handler for GET requests."""
user = users.get_current_user()
if user:
if IS_PRODUCTION:
# We should use federated_identity in production, since the license
# server requires an OpenID
userid = user.federated_identity()
else:
# On the dev server, we won't have access to federated_identity, so
# just use a default OpenID which will never return YES.
# If you want to test different response values on the development
# server, just change this default value (e.g. append '-yes' or
# '-trial').
userid = ('https://www.google.com/accounts/o8/id?'
'id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
license_data = parse_license_data(userid)
template_data = {
'license': license_data,
'user_name': user.nickname(),
'user_id': userid,
'user_logout': users.create_logout_url(self.request.uri),
}
else:
# Force the OpenID login endpoint to be for Google accounts only, since
# the license server doesn't support any other type of OpenID provider.
login_url = users.create_login_url(dest_url='/',
federated_identity='google.com/accounts/o8/id')
template_data = {
'user_login': login_url,
}
# Render a simple template
path = os.path.join(os.path.dirname(__file__), 'templates', 'index.html')
self.response.out.write(template.render(path, template_data))
if __name__ == '__main__':
application = webapp.WSGIApplication([
('/', MainHandler),
], debug=False)
util.run_wsgi_app(application)
| bsd-3-clause |
joebowen/LogMyRocket_API | LogMyRocket/libraries/sys_packages/docutils/docutils/utils/math/__init__.py | 160 | 1683 | # :Id: $Id: __init__.py 7218 2011-11-08 17:42:40Z milde $
# :Author: Guenter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""
This is the Docutils (Python Documentation Utilities) "math" sub-package.
It contains various modules for conversion between different math formats
(LaTeX, MathML, HTML).
:math2html: LaTeX math -> HTML conversion from eLyXer
:latex2mathml: LaTeX math -> presentational MathML
:unichar2tex: Unicode character to LaTeX math translation table
:tex2unichar: LaTeX math to Unicode character translation dictionaries
"""
# helpers for Docutils math support
# =================================
def pick_math_environment(code, numbered=False):
"""Return the right math environment to display `code`.
The test simply looks for line-breaks (``\\``) outside environments.
Multi-line formulae are set with ``align``, one-liners with
``equation``.
If `numbered` evaluates to ``False``, the "starred" versions are used
to suppress numbering.
"""
# cut out environment content:
chunks = code.split(r'\begin{')
toplevel_code = ''.join([chunk.split(r'\end{')[-1]
for chunk in chunks])
if toplevel_code.find(r'\\') >= 0:
env = 'align'
else:
env = 'equation'
if not numbered:
env += '*'
return env
| gpl-3.0 |
kumarkrishna/sympy | sympy/core/function.py | 8 | 83935 | """
There are three types of functions implemented in SymPy:
1) defined functions (in the sense that they can be evaluated) like
exp or sin; they have a name and a body:
f = exp
2) undefined function which have a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) anonymous function (or lambda function) which have a body (defined
with dummy variables) but have no name:
f = Lambda(x, exp(x)*x)
f = Lambda((x, y), exp(x)*y)
The fourth type of functions are composites, like (sin + cos)(x); these work in
SymPy core, but are not yet part of SymPy.
Examples
========
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print(sympy.srepr(f(x).func))
Function('f')
>>> f(x).args
(x,)
"""
from __future__ import print_function, division
from .add import Add
from .assumptions import ManagedProperties
from .basic import Basic
from .cache import cacheit
from .compatibility import iterable, is_sequence, as_int, ordered
from .core import BasicMeta
from .decorators import _sympifyit
from .expr import Expr, AtomicExpr
from .numbers import Rational, Float
from .operations import LatticeOp
from .rules import Transform
from .singleton import S
from .sympify import sympify
from sympy.core.containers import Tuple, Dict
from sympy.core.logic import fuzzy_and
from sympy.core.compatibility import string_types, with_metaclass, range
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import uniq
from sympy.core.evaluate import global_evaluate
import mpmath
import mpmath.libmp as mlib
import inspect
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> from sympy.core.function import _coeff_isneg
>>> from sympy import S, Symbol, oo, pi
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(S(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
"""
if a.is_Mul:
a = a.args[0]
return a.is_Number and a.is_negative
class PoleError(Exception):
pass
class ArgumentIndexError(ValueError):
def __str__(self):
return ("Invalid operation with argument number %s for Function %s" %
(self.args[1], self.args[0]))
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
_new = type.__new__
def __init__(cls, *args, **kwargs):
if hasattr(cls, 'eval'):
evalargspec = inspect.getargspec(cls.eval)
if evalargspec.varargs:
evalargs = None
else:
evalargs = len(evalargspec.args) - 1 # subtract 1 for cls
if evalargspec.defaults:
# if there are default args then they are optional; the
# fewest args will occur when all defaults are used and
# the most when none are used (i.e. all args are given)
evalargs = tuple(range(
evalargs - len(evalargspec.defaults), evalargs + 1))
else:
evalargs = None
# honor kwarg value or class-defined value before using
# the number of arguments in the eval function (if present)
nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', evalargs))
super(FunctionClass, cls).__init__(args, kwargs)
# Canonicalize nargs here; change to set in nargs.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
cls._nargs = nargs
@property
def nargs(self):
"""Return a set of the allowed number of arguments for the function.
Examples
========
>>> from sympy.core.function import Function
>>> from sympy.abc import x, y
>>> f = Function('f')
If the function can take any number of arguments, the set of whole
numbers is returned:
>>> Function('f').nargs
Naturals0()
If the function was initialized to accept one or more arguments, a
corresponding set will be returned:
>>> Function('f', nargs=1).nargs
{1}
>>> Function('f', nargs=(2, 1)).nargs
{1, 2}
The undefined function, after application, also has the nargs
attribute; the actual number of arguments is always available by
checking the ``args`` attribute:
>>> f = Function('f')
>>> f(1).nargs
Naturals0()
>>> len(f(1).args)
1
"""
from sympy.sets.sets import FiniteSet
# XXX it would be nice to handle this in __init__ but there are import
# problems with trying to import FiniteSet there
return FiniteSet(*self._nargs) if self._nargs else S.Naturals0
def __repr__(cls):
return cls.__name__
class Application(with_metaclass(FunctionClass, Basic)):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from sympy.sets.fancysets import Naturals0
from sympy.sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_evaluate[0])
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError("Unknown options: %s" % options)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super(Application, cls).__new__(cls, *args, **options)
# make nargs uniform here
try:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here; change to set in nargs.
if is_sequence(obj.nargs):
obj.nargs = tuple(ordered(set(obj.nargs)))
elif obj.nargs is not None:
obj.nargs = (as_int(obj.nargs),)
obj.nargs = FiniteSet(*obj.nargs) if obj.nargs is not None \
else Naturals0()
except AttributeError:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
obj.nargs = FiniteSet(*obj._nargs) if obj._nargs is not None \
else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Examples of eval() for the function "sign"
---------------------------------------------
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg is S.Zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, Mul):
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One:
return cls(coeff) * cls(terms)
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and old == self.func and
len(self.args) in new.nargs):
return new(*self.args)
class Function(Application, Expr):
"""Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
Examples
========
First example shows how to use Function as a constructor for undefined
function classes:
>>> from sympy import Function, Symbol
>>> x = Symbol('x')
>>> f = Function('f')
>>> g = Function('g')(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
In the following example Function is used as a base class for
``my_func`` that represents a mathematical function *my_func*. Suppose
that it is well known, that *my_func(0)* is *1* and *my_func* at infinity
goes to *0*, so we want those two simplifications to occur automatically.
Suppose also that *my_func(x)* is real exactly when *x* is real. Here is
an implementation that honours those requirements:
>>> from sympy import Function, S, oo, I, sin
>>> class my_func(Function):
...
... @classmethod
... def eval(cls, x):
... if x.is_Number:
... if x is S.Zero:
... return S.One
... elif x is S.Infinity:
... return S.Zero
...
... def _eval_is_real(self):
... return self.args[0].is_real
...
>>> x = S('x')
>>> my_func(0) + sin(0)
1
>>> my_func(oo)
0
>>> my_func(3.54).n() # Not yet implemented for my_func.
my_func(3.54)
>>> my_func(I).is_real
False
In order for ``my_func`` to become useful, several other methods would
need to be implemented. See source code of some of the already
implemented functions for more complete examples.
Also, if the function can take more than one argument, then ``nargs``
must be defined, e.g. if ``my_func`` can take one or two arguments
then,
>>> class my_func(Function):
... nargs = (1, 2)
...
>>>
"""
@property
def _diff_wrt(self):
"""Allow derivatives wrt functions.
Examples
========
>>> from sympy import Function, Symbol
>>> f = Function('f')
>>> x = Symbol('x')
>>> f(x)._diff_wrt
True
"""
return True
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args, **options)
n = len(args)
if n not in cls.nargs:
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
temp = ('%(name)s takes %(qual)s %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': cls,
'qual': 'exactly' if len(cls.nargs) == 1 else 'at least',
'args': min(cls.nargs),
'plural': 's'*(min(cls.nargs) != 1),
'given': n})
evaluate = options.get('evaluate', global_evaluate[0])
result = super(Function, cls).__new__(cls, *args, **options)
if not evaluate or not isinstance(result, cls):
return result
pr = max(cls._should_evalf(a) for a in result.args)
pr2 = min(cls._should_evalf(a) for a in result.args)
if pr2 > 0:
return result.evalf(mlib.libmpf.prec_to_dps(pr))
return result
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number.
This function is used by __new__.
Returns the precision to evalf to, or -1 if it shouldn't evalf.
"""
from sympy.core.symbol import Wild
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
# Don't use as_real_imag() here, that's too much work
a, b = Wild('a'), Wild('b')
m = arg.match(a + b*S.ImaginaryUnit)
if not m or not (m[a].is_Float or m[b].is_Float):
return -1
l = [m[i]._prec for i in m if m[i].is_Float]
l.append(-1)
return max(l)
@classmethod
def class_key(cls):
from sympy.sets.fancysets import Naturals0
funcs = {
'exp': 10,
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
i = 0 if isinstance(cls.nargs, Naturals0) else 10000
return 4, i, name
@property
def is_commutative(self):
"""
Returns whether the functon is commutative.
"""
if all(getattr(t, 'is_commutative') for t in self.args):
return True
else:
return False
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
fname = self.func.__name__
try:
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
try:
return Float(self._imp_(*self.args), prec)
except (AttributeError, TypeError):
return
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
def bad(m):
from mpmath import mpf, mpc
# the precision of an mpf value is the last element
# if that is 1 (and m[1] is not 1 which would indicate a
# power of 2), then the eval failed; so check that none of
# the arguments failed to compute to a finite precision.
# Note: An mpc value has two parts, the re and imag tuple;
# check each of those parts, too. Anything else is allowed to
# pass
if isinstance(m, mpf):
m = m._mpf_
return m[1] !=1 and m[-1] == 1
elif isinstance(m, mpc):
m, n = m._mpc_
return m[1] !=1 and m[-1] == 1 and \
n[1] !=1 and n[-1] == 1
else:
return False
if any(bad(a) for a in args):
raise ValueError # one or more args failed to compute with significance
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da is S.Zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def _eval_is_complex(self):
return fuzzy_and(a.is_complex for a in self.args)
def as_base_exp(self):
"""
Returns the method as the 2-tuple (base, exponent).
"""
return self, S.One
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
from sympy.utilities.misc import filldedent
raise PoleError(filldedent('''
Asymptotic expansion of %s around %s is
not implemented.''' % (type(self), args0)))
def _eval_nseries(self, x, n, logx):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> from sympy import atan2
>>> from sympy.abc import x, y
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> from sympy import loggamma
>>> loggamma(1/x)._eval_nseries(x,0,None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
from sympy import Order
from sympy.sets.sets import FiniteSet
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(t.is_finite is False for t in args0):
from sympy import oo, zoo, nan
# XXX could use t.as_leading_term(x) here but it's a little
# slower
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any([t.has(oo, -oo, zoo, nan) for t in a0]):
return self._eval_aseries(n, args0, x, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy() for t in z]
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x)
return s
if (self.func.nargs is S.Naturals0
or (self.func.nargs == FiniteSet(1) and args0[0])
or any(c > 1 for c in self.func.nargs)):
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
term = e.subs(x, S.Zero)
if term.is_finite is False or term is S.NaN:
raise PoleError("Cannot expand %s around 0" % (self))
series = term
fact = S.One
_x = Dummy('x')
e = e.subs(x, _x)
for i in range(n - 1):
i += 1
fact *= Rational(i)
e = e.diff(_x)
subs = e.subs(_x, S.Zero)
if subs is S.NaN:
# try to evaluate a limit if we have to
subs = e.limit(_x, S.Zero)
if subs.is_finite is False:
raise PoleError("Cannot expand %s around 0" % (self))
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
l = []
g = None
# try to predict a number of terms needed
nterms = n + 2
cf = Order(arg.as_leading_term(x), x).getn()
if cf != 0:
nterms = int(nterms / cf)
for i in range(nterms):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return Add(*l) + Order(x**n, x)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if not (1 <= argindex <= len(self.args)):
raise ArgumentIndexError(self, argindex)
if not self.args[argindex - 1].is_Symbol:
# See issue 4624 and issue 4719 and issue 5600
arg_dummy = Dummy('xi_%i' % argindex)
arg_dummy.dummy_index = hash(self.args[argindex - 1])
return Subs(Derivative(
self.subs(self.args[argindex - 1], arg_dummy),
arg_dummy), arg_dummy, self.args[argindex - 1])
return Derivative(self, self.args[argindex - 1], evaluate=False)
def _eval_as_leading_term(self, x):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
from sympy import Order
args = [a.as_leading_term(x) for a in self.args]
o = Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
'%s has no _eval_as_leading_term routine' % self.func)
else:
return self.func(*args)
def _sage_(self):
import sage.all as sage
fname = self.func.__name__
func = getattr(sage, fname)
args = [arg._sage_() for arg in self.args]
return func(*args)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
def __new__(cls, *args, **options):
args = list(map(sympify, args))
obj = super(AppliedUndef, cls).__new__(cls, *args, **options)
return obj
def _eval_as_leading_term(self, x):
return self
def _sage_(self):
import sage.all as sage
fname = str(self.func)
args = [arg._sage_() for arg in self.args]
func = sage.function(fname, *args)
return func
class UndefinedFunction(FunctionClass):
"""
The (meta)class of undefined functions.
"""
def __new__(mcl, name, **kwargs):
ret = BasicMeta.__new__(mcl, name, (AppliedUndef,), kwargs)
ret.__module__ = None
return ret
def __instancecheck__(cls, instance):
return cls in type(instance).__mro__
UndefinedFunction.__eq__ = lambda s, o: (isinstance(o, s.__class__) and
(s.class_key() == o.class_key()))
class WildFunction(Function, AtomicExpr):
"""
A WildFunction function matches any function (with its arguments).
Examples
========
>>> from sympy import WildFunction, Function, cos
>>> from sympy.abc import x, y
>>> F = WildFunction('F')
>>> f = Function('f')
>>> F.nargs
Naturals0()
>>> x.match(F)
>>> F.match(F)
{F_: F_}
>>> f(x).match(F)
{F_: f(x)}
>>> cos(x).match(F)
{F_: cos(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a given number of arguments, set ``nargs`` to the
desired value at instantiation:
>>> F = WildFunction('F', nargs=2)
>>> F.nargs
{2}
>>> f(x).match(F)
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a range of arguments, set ``nargs`` to a tuple
containing the desired number of arguments, e.g. if ``nargs = (1, 2)``
then functions with 1 or 2 arguments will be matched.
>>> F = WildFunction('F', nargs=(1, 2))
>>> F.nargs
{1, 2}
>>> f(x).match(F)
{F_: f(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
>>> f(x, y, 1).match(F)
"""
include = set()
def __init__(cls, name, **assumptions):
from sympy.sets.sets import Set, FiniteSet
cls.name = name
nargs = assumptions.pop('nargs', S.Naturals0)
if not isinstance(nargs, Set):
# Canonicalize nargs here. See also FunctionClass.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
nargs = FiniteSet(*nargs)
cls.nargs = nargs
def matches(self, expr, repl_dict={}, old=False):
if not isinstance(expr, (AppliedUndef, Function)):
return None
if len(expr.args) not in self.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
expr must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Simplification of high-order derivatives:
Because there can be a significant amount of simplification that can be
done when multiple differentiations are performed, results will be
automatically simplified in a fairly conservative fashion unless the
keyword ``simplify`` is set to False.
>>> from sympy import sqrt, diff
>>> from sympy.abc import x
>>> e = sqrt((x + 1)**2 + x)
>>> diff(e, x, 5, simplify=False).count_ops()
136
>>> diff(e, x, 5).count_ops()
30
Ordering of variables:
If evaluate is set to True and the expression can not be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked. This sorting
assumes that derivatives wrt Symbols commute, derivatives wrt non-Symbols
commute, but Symbol and non-Symbol derivatives don't commute with each
other.
Derivative wrt non-Symbols:
This class also allows derivatives wrt non-Symbols that have _diff_wrt
set to True, such as Function and Derivative. When a derivative wrt a non-
Symbol is attempted, the non-Symbol is temporarily converted to a Symbol
while the differentiation is performed.
Note that this may seem strange, that Derivative allows things like
f(g(x)).diff(g(x)), or even f(cos(x)).diff(cos(x)). The motivation for
allowing this syntax is to make it easier to work with variational calculus
(i.e., the Euler-Lagrange method). The best way to understand this is that
the action of derivative with respect to a non-Symbol is defined by the
above description: the object is substituted for a Symbol and the
derivative is taken with respect to that. This action is only allowed for
objects for which this can be done unambiguously, for example Function and
Derivative objects. Note that this leads to what may appear to be
mathematically inconsistent results. For example::
>>> from sympy import cos, sin, sqrt
>>> from sympy.abc import x
>>> (2*cos(x)).diff(cos(x))
2
>>> (2*sqrt(1 - sin(x)**2)).diff(cos(x))
0
This appears wrong because in fact 2*cos(x) and 2*sqrt(1 - sin(x)**2) are
identically equal. However this is the wrong way to think of this. Think
of it instead as if we have something like this::
>>> from sympy.abc import c, s
>>> def F(u):
... return 2*u
...
>>> def G(u):
... return 2*sqrt(1 - u**2)
...
>>> F(cos(x))
2*cos(x)
>>> G(sin(x))
2*sqrt(-sin(x)**2 + 1)
>>> F(c).diff(c)
2
>>> F(c).diff(c)
2
>>> G(s).diff(c)
0
>>> G(sin(x)).diff(cos(x))
0
Here, the Symbols c and s act just like the functions cos(x) and sin(x),
respectively. Think of 2*cos(x) as f(c).subs(c, cos(x)) (or f(c) *at*
c = cos(x)) and 2*sqrt(1 - sin(x)**2) as g(s).subs(s, sin(x)) (or g(s) *at*
s = sin(x)), where f(u) == 2*u and g(u) == 2*sqrt(1 - u**2). Here, we
define the function first and evaluate it at the function, but we can
actually unambiguously do this in reverse in SymPy, because
expr.subs(Function, Symbol) is well-defined: just structurally replace the
function everywhere it appears in the expression.
This is the same notational convenience used in the Euler-Lagrange method
when one says F(t, f(t), f'(t)).diff(f(t)). What is actually meant is
that the expression in question is represented by some F(t, u, v) at u =
f(t) and v = f'(t), and F(t, f(t), f'(t)).diff(f(t)) simply means F(t, u,
v).diff(u) at u = f(t).
We do not allow derivatives to be taken with respect to expressions where this
is not so well defined. For example, we do not allow expr.diff(x*y)
because there are multiple ways of structurally defining where x*y appears
in an expression, some of which may surprise the reader (for example, a
very strict definition would have that (x*y*z).diff(x*y) == 0).
>>> from sympy.abc import x, y, z
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't differentiate wrt the variable: x*y, 1
Note that this definition also fits in nicely with the definition of the
chain rule. Note how the chain rule in SymPy is defined using unevaluated
Subs objects::
>>> from sympy import symbols, Function
>>> f, g = symbols('f g', cls=Function)
>>> f(2*g(x)).diff(x)
2*Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1),
(_xi_1,), (2*g(x),))
>>> f(g(x)).diff(x)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1),
(_xi_1,), (g(x),))
Finally, note that, to be consistent with variational calculus, and to
ensure that the definition of substituting a Function for a Symbol in an
expression is well-defined, derivatives of functions are assumed to not be
related to the function. In other words, we have::
>>> from sympy import diff
>>> diff(f(x), x).diff(f(x))
0
The same is true for derivatives of different orders::
>>> diff(f(x), x, 2).diff(diff(f(x), x, 1))
0
>>> diff(f(x), x, 1).diff(diff(f(x), x, 2))
0
Note, any class can allow derivatives to be taken with respect to itself.
See the docstring of Expr._diff_wrt.
Examples
========
Some basic examples:
>>> from sympy import Derivative, Symbol, Function
>>> f = Function('f')
>>> g = Function('g')
>>> x = Symbol('x')
>>> y = Symbol('y')
>>> Derivative(x**2, x, evaluate=True)
2*x
>>> Derivative(Derivative(f(x,y), x), y)
Derivative(f(x, y), x, y)
>>> Derivative(f(x), x, 3)
Derivative(f(x), x, x, x)
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Now some derivatives wrt functions:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
>>> Derivative(f(g(x)), x, evaluate=True)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1),
(_xi_1,), (g(x),))
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Derivatives if it contains a function.
Examples
========
>>> from sympy import Function, Symbol, Derivative
>>> f = Function('f')
>>> x = Symbol('x')
>>> Derivative(f(x),x)._diff_wrt
True
>>> Derivative(x**2,x)._diff_wrt
False
"""
if self.expr.is_Function:
return True
else:
return False
def __new__(cls, expr, *variables, **assumptions):
expr = sympify(expr)
# There are no variables, we differentiate wrt all of the free symbols
# in expr.
if not variables:
variables = expr.free_symbols
if len(variables) != 1:
from sympy.utilities.misc import filldedent
raise ValueError(filldedent('''
Since there is more than one variable in the
expression, the variable(s) of differentiation
must be supplied to differentiate %s''' % expr))
# Standardize the variables by sympifying them and making appending a
# count of 1 if there is only one variable: diff(e,x)->diff(e,x,1).
variables = list(sympify(variables))
if not variables[-1].is_Integer or len(variables) == 1:
variables.append(S.One)
# Split the list of variables into a list of the variables we are diff
# wrt, where each element of the list has the form (s, count) where
# s is the entity to diff wrt and count is the order of the
# derivative.
variable_count = []
all_zero = True
i = 0
while i < len(variables) - 1: # process up to final Integer
v, count = variables[i: i + 2]
iwas = i
if v._diff_wrt:
# We need to test the more specific case of count being an
# Integer first.
if count.is_Integer:
count = int(count)
i += 2
elif count._diff_wrt:
count = 1
i += 1
if i == iwas: # didn't get an update because of bad input
from sympy.utilities.misc import filldedent
last_digit = int(str(count)[-1])
ordinal = 'st' if last_digit == 1 else 'nd' if last_digit == 2 else 'rd' if last_digit == 3 else 'th'
raise ValueError(filldedent('''
Can\'t calculate %s%s derivative wrt %s.''' % (count, ordinal, v)))
if all_zero and not count == 0:
all_zero = False
if count:
variable_count.append((v, count))
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if all_zero:
return expr
# Pop evaluate because it is not really an assumption and we will need
# to track it carefully below.
evaluate = assumptions.pop('evaluate', False)
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannnot check non-symbols like
# functions and Derivatives as those can be created by intermediate
# derivatives.
if evaluate:
symbol_set = set(sc[0] for sc in variable_count if sc[0].is_Symbol)
if symbol_set.difference(expr.free_symbols):
return S.Zero
# We make a generator so as to only generate a variable when necessary.
# If a high order of derivative is requested and the expr becomes 0
# after a few differentiations, then we won't need the other variables.
variablegen = (v for v, count in variable_count for i in range(count))
# If we can't compute the derivative of expr (but we wanted to) and
# expr is itself not a Derivative, finish building an unevaluated
# derivative class by calling Expr.__new__.
if (not (hasattr(expr, '_eval_derivative') and evaluate) and
(not isinstance(expr, Derivative))):
variables = list(variablegen)
# If we wanted to evaluate, we sort the variables into standard
# order for later comparisons. This is too aggressive if evaluate
# is False, so we don't do it in that case.
if evaluate:
#TODO: check if assumption of discontinuous derivatives exist
variables = cls._sort_variables(variables)
# Here we *don't* need to reinject evaluate into assumptions
# because we are done with it and it is not an assumption that
# Expr knows about.
obj = Expr.__new__(cls, expr, *variables, **assumptions)
return obj
# Compute the derivative now by repeatedly calling the
# _eval_derivative method of expr for each variable. When this method
# returns None, the derivative couldn't be computed wrt that variable
# and we save the variable for later.
unhandled_variables = []
# Once we encouter a non_symbol that is unhandled, we stop taking
# derivatives entirely. This is because derivatives wrt functions
# don't commute with derivatives wrt symbols and we can't safely
# continue.
unhandled_non_symbol = False
nderivs = 0 # how many derivatives were performed
for v in variablegen:
is_symbol = v.is_Symbol
if unhandled_non_symbol:
obj = None
else:
if not is_symbol:
new_v = Dummy('xi_%i' % i)
new_v.dummy_index = hash(v)
expr = expr.subs(v, new_v)
old_v = v
v = new_v
obj = expr._eval_derivative(v)
nderivs += 1
if not is_symbol:
if obj is not None:
obj = obj.subs(v, old_v)
v = old_v
if obj is None:
unhandled_variables.append(v)
if not is_symbol:
unhandled_non_symbol = True
elif obj is S.Zero:
return S.Zero
else:
expr = obj
if unhandled_variables:
unhandled_variables = cls._sort_variables(unhandled_variables)
expr = Expr.__new__(cls, expr, *unhandled_variables, **assumptions)
else:
# We got a Derivative at the end of it all, and we rebuild it by
# sorting its variables.
if isinstance(expr, Derivative):
expr = cls(
expr.args[0], *cls._sort_variables(expr.args[1:])
)
if nderivs > 1 and assumptions.get('simplify', True):
from sympy.core.exprtools import factor_terms
from sympy.simplify.simplify import signsimp
expr = factor_terms(signsimp(expr))
return expr
@classmethod
def _sort_variables(cls, vars):
"""Sort variables, but disallow sorting of non-symbols.
When taking derivatives, the following rules usually hold:
* Derivative wrt different symbols commute.
* Derivative wrt different non-symbols commute.
* Derivatives wrt symbols and non-symbols don't commute.
Examples
========
>>> from sympy import Derivative, Function, symbols
>>> vsort = Derivative._sort_variables
>>> x, y, z = symbols('x y z')
>>> f, g, h = symbols('f g h', cls=Function)
>>> vsort((x,y,z))
[x, y, z]
>>> vsort((h(x),g(x),f(x)))
[f(x), g(x), h(x)]
>>> vsort((z,y,x,h(x),g(x),f(x)))
[x, y, z, f(x), g(x), h(x)]
>>> vsort((x,f(x),y,f(y)))
[x, f(x), y, f(y)]
>>> vsort((y,x,g(x),f(x),z,h(x),y,x))
[x, y, f(x), g(x), z, h(x), x, y]
>>> vsort((z,y,f(x),x,f(x),g(x)))
[y, z, f(x), x, f(x), g(x)]
>>> vsort((z,y,f(x),x,f(x),g(x),z,z,y,x))
[y, z, f(x), x, f(x), g(x), x, y, z, z]
"""
sorted_vars = []
symbol_part = []
non_symbol_part = []
for v in vars:
if not v.is_Symbol:
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
symbol_part = []
non_symbol_part.append(v)
else:
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
non_symbol_part = []
symbol_part.append(v)
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
return sorted_vars
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative(self, v):
# If the variable s we are diff wrt is not in self.variables, we
# assume that we might be able to take the derivative.
if v not in self.variables:
obj = self.expr.diff(v)
if obj is S.Zero:
return S.Zero
if isinstance(obj, Derivative):
return obj.func(obj.expr, *(self.variables + obj.variables))
# The derivative wrt s could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when obj is a simple
# number so that the derivative wrt anything else will vanish.
return self.func(obj, *self.variables, evaluate=True)
# In this case s was in self.variables so the derivatve wrt s has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
return self.func(self.expr, *(self.variables + (v, )), evaluate=False)
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
return self.func(expr, *self.variables, **hints)
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
import mpmath
from sympy.core.expr import Expr
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec))
f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec))
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
return self._args[0]
@property
def variables(self):
return self._args[1:]
@property
def free_symbols(self):
return self.expr.free_symbols
def _eval_subs(self, old, new):
if old in self.variables and not new.is_Symbol:
# issue 4719
return Subs(self, old, new)
# If both are Derivatives with the same expr, check if old is
# equivalent to self or if old is a subderivative of self.
if old.is_Derivative and old.expr == self.args[0]:
# Check if canonnical order of variables is equal.
old_vars = Derivative._sort_variables(old.variables)
self_vars = Derivative._sort_variables(self.args[1:])
if old_vars == self_vars:
return new
# Check if olf is a subderivative of self.
if len(old_vars) < len(self_vars):
self_vars_front = []
match = True
while old_vars and self_vars and match:
if old_vars[0] == self_vars[0]:
old_vars.pop(0)
self_vars.pop(0)
else:
# If self_v does not match old_v, we need to check if
# the types are the same (symbol vs non-symbol). If
# they are, we can continue checking self_vars for a
# match.
if old_vars[0].is_Symbol != self_vars[0].is_Symbol:
match = False
else:
self_vars_front.append(self_vars.pop(0))
if match:
variables = self_vars_front + self_vars
return Derivative(new, *variables)
return Derivative(*(x._subs(old, new) for x in self.args))
def _eval_lseries(self, x, logx):
dx = self.args[1:]
for term in self.args[0].lseries(x, logx=logx):
yield self.func(term, *dx)
def _eval_nseries(self, x, n, logx):
arg = self.args[0].nseries(x, n=n, logx=logx)
o = arg.getO()
dx = self.args[1:]
rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x):
return self.args[0].as_leading_term(x)
def _sage_(self):
import sage.all as sage
args = [arg._sage_() for arg in self.args]
return sage.derivative(*args)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, variables, expr):
from sympy.sets.sets import FiniteSet
try:
for v in variables if iterable(variables) else [variables]:
if not v.is_Symbol:
raise TypeError('variable is not a symbol: %s' % v)
except (AssertionError, AttributeError):
raise ValueError('variable is not a Symbol: %s' % v)
try:
variables = Tuple(*variables)
except TypeError:
variables = Tuple(variables)
if len(variables) == 1 and variables[0] == expr:
return S.IdentityFunction
obj = Expr.__new__(cls, Tuple(*variables), S(expr))
obj.nargs = FiniteSet(len(variables))
return obj
@property
def variables(self):
"""The variables used in the internal representation of the function"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
## XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
return self.expr.xreplace(dict(list(zip(self.variables, args))))
def __eq__(self, other):
if not isinstance(other, Lambda):
return False
if self.nargs != other.nargs:
return False
selfexpr = self.args[1]
otherexpr = other.args[1]
otherexpr = otherexpr.xreplace(dict(list(zip(other.args[0], self.args[0]))))
return selfexpr == otherexpr
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Lambda, self).__hash__()
def _hashable_content(self):
return (self.expr.xreplace(self.canonical_variables),)
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
if len(self.args) == 2:
return self.args[0] == self.args[1]
else:
return None
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs(expr, x, x0)`` receives 3 arguments: an expression, a variable or
list of distinct variables and a point or list of evaluation points
corresponding to those variables.
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
A simple example:
>>> from sympy import Subs, Function, sin
>>> from sympy.abc import x, y, z
>>> f = Function('f')
>>> e = Subs(f(x).diff(x), x, y)
>>> e.subs(y, 0)
Subs(Derivative(f(x), x), (x,), (0,))
>>> e.subs(f, sin).doit()
cos(y)
An example with several variables:
>>> Subs(f(x)*sin(y) + z, (x, y), (0, 1))
Subs(z + f(x)*sin(y), (x, y), (0, 1))
>>> _.doit()
z + f(0)*sin(1)
"""
def __new__(cls, expr, variables, point, **assumptions):
from sympy import Symbol
if not is_sequence(variables, Tuple):
variables = [variables]
variables = list(sympify(variables))
if list(uniq(variables)) != variables:
repeated = [ v for v in set(variables) if variables.count(v) > 1 ]
raise ValueError('cannot substitute expressions %s more than '
'once.' % repeated)
point = Tuple(*(point if is_sequence(point, Tuple) else [point]))
if len(point) != len(variables):
raise ValueError('Number of point values must be the same as '
'the number of variables.')
expr = sympify(expr)
# use symbols with names equal to the point value (with preppended _)
# to give a variable-independent expression
pre = "_"
pts = sorted(set(point), key=default_sort_key)
from sympy.printing import StrPrinter
class CustomStrPrinter(StrPrinter):
def _print_Dummy(self, expr):
return str(expr) + str(expr.dummy_index)
def mystr(expr, **settings):
p = CustomStrPrinter(settings)
return p.doprint(expr)
while 1:
s_pts = dict([(p, Symbol(pre + mystr(p))) for p in pts])
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-preppended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + mystr(point[variables.index(r)])) != r
for _, r in reps):
pre += "_"
continue
break
obj = Expr.__new__(cls, expr, Tuple(*variables), point)
obj._expr = expr.subs(reps)
return obj
def _eval_is_commutative(self):
return self.expr.is_commutative
def doit(self):
return self.expr.doit().subs(list(zip(self.variables, self.point)))
def evalf(self, prec=None, **options):
return self.doit().evalf(prec, **options)
n = evalf
@property
def variables(self):
"""The variables to be evaluated"""
return self._args[1]
@property
def expr(self):
"""The expression on which the substitution operates"""
return self._args[0]
@property
def point(self):
"""The values for which the variables are to be substituted"""
return self._args[2]
@property
def free_symbols(self):
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._expr == other._expr
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Subs, self).__hash__()
def _hashable_content(self):
return (self._expr.xreplace(self.canonical_variables),)
def _eval_subs(self, old, new):
if old in self.variables:
return self
def _eval_derivative(self, s):
if s not in self.free_symbols:
return S.Zero
return self.func(self.expr.diff(s), self.variables, self.point).doit() \
+ Add(*[ Subs(point.diff(s) * self.expr.diff(arg),
self.variables, self.point).doit() for arg,
point in zip(self.variables, self.point) ])
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), x, x, x)
>>> diff(f(x), x, 3)
Derivative(f(x), x, x, x)
>>> diff(sin(x)*cos(y), x, 2, y, 2)
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
http://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html
See Also
========
Derivative
sympy.geometry.util.idiff: computes the derivative implicitly
"""
kwargs.setdefault('evaluate', True)
try:
return f._eval_diff(*symbols, **kwargs)
except AttributeError:
pass
return Derivative(f, *symbols, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using methods given as hints.
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
The ``basic`` hint is used for any special rewriting of an object that
should be done automatically (along with the other hints like ``mul``)
when expand is called. This is a catch-all hint to handle any sort of
expansion that may not be described by the existing hint names. To use
this hint an object should override the ``_eval_expand_basic`` method.
Objects may also define their own expand methods, which are not run by
default. See the API section below.
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Hints
=====
These hints are run by default
mul
---
Distributes multiplication over addition:
>>> from sympy import cos, exp, sin
>>> from sympy.abc import x, y, z
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial
-----------
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp
---------
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base
----------
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, SymPy performs
it automatically:
>>> (x*y)**2
x**2*y**2
log
---
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> from sympy import log, symbols
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x,y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
basic
-----
This hint is intended primarily as a way for custom subclasses to enable
expansion by default.
These hints are not run by default:
complex
-------
Split an expression into real and imaginary parts.
>>> x, y = symbols('x,y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func
----
Expand other functions.
>>> from sympy import gamma
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig
----
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change. See `this MathWorld article
<http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more
information.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x + y)*(x + y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x + exp(x + y)).expand(deep=False)
exp(x)*exp(exp(x + y))
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial`, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> from sympy import expand, expand_mul, expand_power_base
>>> x, y, z = symbols('x,y,z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the mul
expanded form, either of the following will work::
>>> expand_mul(log(x*(y + z)))
log(x*y + x*z)
>>> expand(log(x*(y + z)), log=False)
log(x*y + x*z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
API
===
Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Examples
========
>>> from sympy import Expr, sympify
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, **hints):
... '''
... Doubles the args of MyClass.
...
... If there more than four args, doubling is not performed,
... unless force=True is also used (False by default).
... '''
... force = hints.pop('force', False)
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func, hyperexpand
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# This is a special application of two hints
def _mexpand(expr, recursive=False):
# expand multinomials and then expand products; this may not always
# be sufficient to give a fully expanded expression (see
# test_issue_8247_8354 in test_arit)
if expr is None:
return
was = None
while was != expr:
was, expr = expr, expand_mul(expand_multinomial(expr))
if not recursive:
break
return expr
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(log(x) + 2*log(y))*exp(x + y)
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_trig, sin
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_complex, exp, sqrt, I
>>> from sympy.abc import z
>>> expand_complex(exp(z))
I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
See the expand docstring for more information.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> from sympy.abc import x, y, z
>>> from sympy import expand_power_base, sin, cos, exp
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*exp(y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y*y**z
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
See the expand docstring for more information.
Examples
========
>>> from sympy import expand_power_exp
>>> from sympy.abc import x, y
>>> expand_power_exp(x**(y + 2))
x**2*x**y
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
If ``visual`` is ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``visual`` is ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
If expr is an iterable, the sum of the op counts of the
items will be returned.
Examples
========
>>> from sympy.abc import a, b, x, y
>>> from sympy import sin, count_ops
Although there isn't a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq=x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from sympy import Integral, Symbol
from sympy.simplify.radsimp import fraction
from sympy.logic.boolalg import BooleanFunction
expr = sympify(expr)
if isinstance(expr, Expr):
ops = []
args = [expr]
NEG = Symbol('NEG')
DIV = Symbol('DIV')
SUB = Symbol('SUB')
ADD = Symbol('ADD')
while args:
a = args.pop()
if isinstance(a, str):
continue
if a.is_Rational:
#-1/3 = NEG + DIV
if a is not S.One:
if a.p < 0:
ops.append(NEG)
if a.q != 1:
ops.append(DIV)
continue
elif a.is_Mul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
if n < 0:
ops.append(NEG)
args.append(d)
continue # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
continue
if a.is_Pow and a.exp is S.NegativeOne:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if (a.is_Mul or
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, Integral)):
o = Symbol(a.func.__name__.upper())
# count the args
if (a.is_Mul or isinstance(a, LatticeOp)):
ops.append(o*(len(a.args) - 1))
else:
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif type(expr) is dict:
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.items()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif isinstance(expr, BooleanFunction):
ops = []
for arg in expr.args:
ops.append(count_ops(arg, visual=True))
o = Symbol(expr.func.__name__.upper())
ops.append(o)
elif not isinstance(expr, Basic):
ops = []
else: # it's Basic not isinstance(expr, Expr):
if not isinstance(expr, Basic):
raise TypeError("Invalid type of expr")
else:
ops = []
args = [expr]
while args:
a = args.pop()
if a.args:
o = Symbol(a.func.__name__.upper())
if a.is_Boolean:
ops.append(o*(len(a.args)-1))
else:
ops.append(o)
args.extend(a.args)
if not ops:
if visual:
return S.Zero
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True).
Examples
========
>>> from sympy.core.function import nfloat
>>> from sympy.abc import x, y
>>> from sympy import cos, pi, sqrt
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
"""
from sympy.core.power import Pow
from sympy.polys.rootoftools import RootOf
if iterable(expr, exclude=string_types):
if isinstance(expr, (dict, Dict)):
return type(expr)([(k, nfloat(v, n, exponent)) for k, v in
list(expr.items())])
return type(expr)([nfloat(a, n, exponent) for a in expr])
rv = sympify(expr)
if rv.is_Number:
return Float(rv, n)
elif rv.is_number:
# evalf doesn't always set the precision
rv = rv.n(n)
if rv.is_Number:
rv = Float(rv.n(n), n)
else:
pass # pure_complex(rv) is likely True
return rv
# watch out for RootOf instances that don't like to have
# their exponents replaced with Dummies and also sometimes have
# problems with evaluating at low precision (issue 6393)
rv = rv.xreplace(dict([(ro, ro.n(n)) for ro in rv.atoms(RootOf)]))
if not exponent:
reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)]
rv = rv.xreplace(dict(reps))
rv = rv.n(n)
if not exponent:
rv = rv.xreplace(dict([(d.exp, p.exp) for p, d in reps]))
else:
# Pow._eval_evalf special cases Integer exponents so if
# exponent is suppose to be handled we have to do so here
rv = rv.xreplace(Transform(
lambda x: Pow(x.base, Float(x.exp, n)),
lambda x: x.is_Pow and x.exp.is_Integer))
return rv.xreplace(Transform(
lambda x: x.func(*nfloat(x.args, n, exponent)),
lambda x: isinstance(x, Function)))
from sympy.core.symbol import Dummy
| bsd-3-clause |
hlzz/dotfiles | graphics/VTK-7.0.0/Utilities/Maintenance/semanticDiffVersion.py | 1 | 21110 | #!/usr/bin/env python
"""
This script generates API changes between two different versions of VTK.
The script uses git and ctags to preset a list of classes added/removed, public
methods added/removed when going from one version to the other.
"""
# ctags generated by the command:
# ctags -R --sort=yes --c++-kinds=cf --fields=aiksz --language-force=C++
# --exclude=*.in --exclude=*.java --exclude=*.py --exclude=*.js --exclude=*.bmp
# -f vtk ~/Projects/vtk/src
import distutils.spawn
import os.path
import re
import shutil
import subprocess
import sys
import tempfile
try:
import argparse
except ImportError:
from vtk.util import _argparse as argparse
tagMatcherType = re.compile('^(.+)\t(\S+)\t/\^(.*)\$/;"\t(.*)\n')
class Tag:
def __init__(self):
self._name = None
self._access = None
self._kind = None
self._fyle = None
self._decl = None
self._class = None
self._inherits = None
def __repr__(self):
return "<Tag %s in file %s>" % (self.name, self.fyle)
def parse(self, line):
"""
Parse a CTags line and set myself from it.
"""
# example of CTags lines:
# Type1: ARangeFunctor /home/sankhesh/Projects/vtk/src/Common/Core/Testing/Cxx/TestSMP.cxx /^class ARangeFunctor$/;" kind:c
# Type2: APIDiagram /home/sankhesh/Projects/vtk/src/Charts/Core/Testing/Cxx/TestDiagram.cxx /^class APIDiagram : public vtkContextItem$/;" kind:c inherits:vtkContextItem
# Type3: AbortFlagOff /home/sankhesh/Projects/vtk/src/Common/Core/vtkCommand.h /^ void AbortFlagOff()$/;" kind:f class:vtkCommand access:public
# Type4: BooleanSet /home/sankhesh/Projects/vtk/src/Filters/General/vtkMultiThreshold.h /^ class BooleanSet : public Set {$/;" kind:c class:vtkMultiThreshold inherits:Set access:public
tag = tagMatcherType.search(line)
if not tag:
raise KeyError, "line %s is not an expected ctags line" %line
self._name = tag.expand('\\1').strip()
self._fyle = tag.expand('\\2').strip()
self._decl = tag.expand('\\3').strip()
exts = tag.expand('\\4').strip().split('\t')
for ext in exts:
ext_stripped = ext.strip()
pre = ext_stripped.split(':')[0]
suf = ext_stripped.split(':')[1]
if pre == 'kind':
self._kind = suf
continue
if pre == 'class' or pre == 'struct':
first_colon = ext_stripped.find(':')
self._class = ext_stripped[(first_colon+1):]
if "anon" in self._class:
first_anon = self._class.find('__anon')
first_dcolon = self._class[first_anon:].find("::")
if first_dcolon > -1:
self._class = self._class[(first_anon+first_dcolon+2):]
else:
self._class = ""
continue
if pre == 'access':
self._access = suf
continue
if pre == 'inherits':
self._inherits = suf
continue
if pre == 'line':
self._fyle = self._fyle + ":" + suf
class CTags():
def __init__(self):
self._tagFile = "" # path to tag file
self._classes = []
self._pub_methods = []
self._class_file = {}
self._met_file = {}
def set_tag_file(self, path):
"""
Parse tags from the given file.
"""
handle = open(path, "r")
self._parse(handle.readlines())
def _parse(self, lines):
for line in lines:
if line.startswith('!_TAG_'):
continue
if line.startswith('ctags: Warning: ignoring null tag'):
continue
t = Tag()
t.parse(line)
# Now make append to list of classes and public methods
if t._kind == 'c':
self._classes.append(t._name)
self._class_file[t._name] = t._fyle
if t._kind == 'f' and t._access == 'public':
met = t._class + ":" + t._name
self._pub_methods.append(met)
self._met_file[met] = t._fyle
class CompareVersions():
def __init__(self, src_dir, version1, version2, tmp_dir):
self.tmp_dir = os.path.abspath(tmp_dir)
self.src_dir = self.tmp_dir + os.sep + os.path.basename(src_dir)
self.src_work_dir = os.path.abspath(src_dir)
self.version1 = version1
self.version2 = version2
self.classes_removed = []
self._classes_removed = {}
self.classes_added = []
self._classes_added = {}
self.pub_methods_removed = []
self._pub_methods_removed = {}
self.pub_methods_added = []
self._pub_methods_added = {}
self.tag_f1 = None
self.tag_f2 = None
self._git_HEAD = None
self._git_diff = None
self.dep_methods = {}
# Get system executable paths for git and ctags
self.svar = SystemVar()
# Setup the temporary working tree
self.setup_src_dir()
# Get newly deprecated methods list
self.get_deprecated_methods()
# Compare the two versions provided
self.generate_ctags()
self.get_diff()
def get_diff(self):
print "Parsing tag file %s..." % self.tag_f1
T1 = CTags()
T1.set_tag_file(self.tag_f1)
print "Parsing tag file %s...Done" % self.tag_f1
print "Parsing tag file %s..." % self.tag_f2
T2 = CTags()
T2.set_tag_file(self.tag_f2)
print "Parsing tag file %s...Done" % self.tag_f2
self.classes_added = list(set(T2._classes) - set(T1._classes))
self.classes_removed = list(set(T1._classes) - set(T2._classes))
self.pub_methods_added = list(set(T2._pub_methods) - set(T1._pub_methods))
self.pub_methods_removed = list(set(T1._pub_methods) - set(T2._pub_methods))
for cls in self.classes_added:
self._classes_added[cls] = T2._class_file[cls].split(":")[0]
for cls in self.classes_removed:
self._classes_removed[cls] = T1._class_file[cls].split(":")[0]
for met in self.pub_methods_added:
self._pub_methods_added[met] = T2._met_file[met].split(":")[0]
for met in self.pub_methods_removed:
self._pub_methods_removed[met] = T1._met_file[met].split(":")[0]
def git_checkout_version(self, ver):
git_cmd = self.svar.git_exe + ' --git-dir=' + self.src_dir + os.sep + '.git --work-tree=' +\
self.src_dir
# Make sure working tree does not have any local modifications
git_modified = git_cmd + ' ls-files -m'
git_m_proc = subprocess.Popen(git_modified.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_m_proc.wait()
git_m_proc_stdout = git_m_proc.stdout.read()
if git_m_proc_stdout:
print "Working tree has local modifications."
print "Please commit or reset the following files:"
print git_m_proc_stdout
sys.exit(1)
# Reset the working tree to version specified
git_reset_cmd = git_cmd + ' checkout ' + ver
git_proc = subprocess.Popen(git_reset_cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_proc.wait()
git_proc_stderr = git_proc.stderr.read()
if git_proc.returncode:
print "Error while git reset to version:", ver
print git_proc_stderr
sys.exit(1)
def create_tagfile(self, src_dir, fname):
ctags_proc = subprocess.Popen([self.svar.ctags_exe, '-R', '--sort=yes',
'--c++-kinds=cf', '--fields=aiknsz', '--language-force=C++',
'--exclude=*.in', '--exclude=*.java', '--exclude=*.py',
'--exclude=*.css', '--exclude=*.js', '--exclude=*.bmp',
'-f', fname, src_dir],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ctags_proc.wait()
ctags_proc_stderr = ctags_proc.stderr.read()
if ctags_proc_stderr:
print "Error while creating tagfile with ctags:", fname
print ctags_proc_stderr
sys.exit(1)
def generate_ctags(self):
# Store the current HEAD SHA-1 sum
print "Getting the SHA-1 sum of HEAD of current working tree..."
self._git_HEAD = self.git_current_version(self.src_dir)
print "Getting the SHA-1 sum of HEAD of current working tree... %s" %\
self._git_HEAD
# Reset working tree to version1
print "Checking out version %s..." % self.version1
self.git_checkout_version(self.version1)
# Create tag file for version1
self.tag_f1 = self.gen_tagfile_name(self.version1)
print "Creating tagfile (%s) for version %s..." % (self.tag_f1,
self.version1)
self.create_tagfile(self.src_dir, self.tag_f1)
print "Creating tagfile (%s) for version %s...Done" % (self.tag_f1,
self.version1)
# Reset working tree to version2
print "Checking out version %s..." % self.version2
self.git_checkout_version(self.version2)
# Create tag file for version2
self.tag_f2 = self.gen_tagfile_name(self.version2)
print "Creating tagfile (%s) for version %s..." % (self.tag_f2,
self.version2)
self.create_tagfile(self.src_dir, self.tag_f2)
print "Creating tagfile (%s) for version %s...Done" % (self.tag_f2,
self.version2)
def gen_tagfile_name(self, ver):
tag_f_name = self.tmp_dir + os.sep + os.path.basename(self.src_dir) +\
"_" + ver + '.ctags'
return tag_f_name
def git_current_version(self, src_dir):
git_cmd = self.svar.git_exe + ' --git-dir=' + src_dir + os.sep +\
'.git --work-tree=' + src_dir + ' rev-parse HEAD'
git_proc = subprocess.Popen(git_cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_proc.wait()
git_proc_stderr = git_proc.stderr.read()
if git_proc_stderr:
print "Error while getting current HEAD SHA-1 sum:"
print git_proc_stderr
sys.exit(1)
return git_proc.stdout.read()
def git_diff_versions(self):
git_cmd = self.svar.git_exe + ' --git-dir=' + self.src_dir + os.sep +\
'.git --work-tree=' + self.src_dir + ' diff ' + self.version1 +\
'..' + self.version2
self._git_diff = self.tmp_dir + os.sep + os.path.basename(self.src_dir)\
+ "_" + self.version1 + "_" + self.version2 + ".diff"
with open(self._git_diff, "w") as diff_file:
git_proc = subprocess.Popen(git_cmd.split(), stdout=diff_file,
stderr=subprocess.PIPE)
git_proc.wait()
git_proc_stderr = git_proc.stderr.read()
if git_proc.returncode:
print "Error while getting git diff between versions %s and %s" %\
(self.version1, self.version2)
print git_proc_stderr
sys.exit(1)
def get_deprecated_methods(self):
fnameMatcher = re.compile('diff --git .* b/(.*)\n')
depMatcher = re.compile('^\+.*VTK_LEGACY\((.*)\).*\n')
rtDepMatcher = re.compile('^\+.*VTK_LEGACY_BODY\((.*),\s*"(.*)"\).*\n')
self.git_diff_versions()
fname = ''
with open(self._git_diff, "r+") as diff_file:
for line in diff_file:
fnamematch = fnameMatcher.search(line)
if fnamematch:
fname = fnamematch.expand('\\1').strip()#.split('/')[-1]
if "vtkSetGet.h" in fname:
continue
dep_diff = depMatcher.search(line)
if dep_diff:
self.dep_methods[dep_diff.expand('\\1').strip()] = [fname, "?"]
else:
dep_diff = rtDepMatcher.search(line)
if dep_diff:
self.dep_methods[dep_diff.expand('\\1').strip()] =\
[fname, dep_diff.expand('\\2').strip()]
def setup_src_dir(self):
print "Setting up a clone of working tree (%s) in %s..." %\
(self.src_work_dir, self.src_dir)
git_cmd = self.svar.git_exe + ' clone -l --no-hardlinks -q ' +\
self.src_work_dir + ' ' + self.src_dir
git_proc = subprocess.Popen(git_cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_proc.wait()
git_proc_stderr = git_proc.stderr.read()
if git_proc_stderr:
print "Error while git clone into temporary working directory."
print "Command Used:", git_cmd
print git_proc_stderr
sys.exit(1)
print "Setting up a clone of working tree (%s) in %s...Done" %\
(self.src_work_dir, self.src_dir)
def cleanup(self):
if os.path.exists(self.tag_f1):
os.remove(self.tag_f1)
if os.path.exists(self.tag_f2):
os.remove(self.tag_f2)
if os.path.exists(self._git_diff):
os.remove(self._git_diff)
if os.path.exists(self.src_dir):
shutil.rmtree(self.src_dir)
class SystemVar():
def __init__(self):
self.git_exe = None
self.ctags_exe = None
self.get_exes()
def get_exes(self):
self.git_exe = self.get_exe("git")
self.ctags_exe = self.get_exe("ctags")
def get_exe(self, exe):
exe_file = distutils.spawn.find_executable(exe)
if not exe_file:
print "%s not found in system PATH" % exe
for trial in range(3):
exe_file = raw_input("Enter full path to %s executable:\n"\
% exe)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
break
else:
print "The provided path is not an executable"
exe_file = None
if not exe_file:
print "Could not locate %s executable in 3 tries. Exiting..."\
% exe
sys.exit(1)
return exe_file
def printReport(comp, args):
# Remove location of temp dir from printout
str_to_replace = os.path.abspath(args.tmp) + os.sep +\
os.path.basename(args.src[0]) + os.sep
def cleanListIndices(alist, parentList=None):
# Remove stuff we don't care about and sort so module/kits are apparent
def removeExtras(cls):
tmp = alist[cls]
if isinstance(tmp, list):
tmp = tmp[0]
if parentList:
# don't report new/removed methods that are part of new/removed classes
clsname = tmp.split('/')[-1].split('.')[0]
if clsname in parentList:
return False
return ("Testing/Cxx" not in tmp and
"Examples" not in tmp and
"ThirdParty" not in tmp and
"Utilities" not in tmp)
def getDir(cls):
tmp = alist[cls]
if isinstance(tmp, list):
tmp = tmp[0]
return tmp.replace(str_to_replace, '')
return sorted(
filter(removeExtras, alist),
key=lambda cls: getDir(cls))
if args.wikiOutput:
print "==API differences when going from version %s to version %s=="\
%(args.versions[0], args.versions[1])
print "===Classes/Structs added in version %s===" %args.versions[1]
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Class Name'
print r'!File'
for cls in cleanListIndices(comp._classes_added):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp._classes_added[cls].replace(str_to_replace, '')
print r'|}'
print "===Classes/Structs removed from version %s===" %args.versions[0]
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Class Name'
print r'!File'
for cls in cleanListIndices(comp._classes_removed):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp._classes_removed[cls].replace(str_to_replace, '')
print r'|}'
print "===Public methods added in version %s===" %args.versions[1]
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Method'
print r'!File'
for cls in cleanListIndices(comp._pub_methods_added, comp._classes_added):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp._pub_methods_added[cls].replace(str_to_replace, '')
print r'|}'
print "===Public methods removed from version %s===" %args.versions[0]
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Method'
print r'!File'
for cls in cleanListIndices(comp._pub_methods_removed, comp._classes_removed):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp._pub_methods_removed[cls].replace(str_to_replace, '')
print r'|}'
print "==Deprecated Methods=="
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Method'
print r'!Deprecated in'
print r'!As of'
for cls in cleanListIndices(comp.dep_methods):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp.dep_methods[cls][0]
print r'|%s' %comp.dep_methods[cls][1]
print r'|}'
else:
print "-------------------- REPORT --------------------"
print "==API differences when going from version %s to version %s=="\
%(args.versions[0], args.versions[1])
print "~~~~~~~~~~~~~Classes/Structs Added~~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp._classes_added):
print "%s\t%s" %(cls, comp._classes_added[cls].replace(str_to_replace, ''))
print "~~~~~~~~~~~~Classes/Structs Removed~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp._classes_removed):
print "%s\t%s" %(cls, comp._classes_removed[cls].replace(str_to_replace, ''))
print "~~~~~~~~~~~~~Public Methods Added~~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp._pub_methods_added, comp._classes_added):
print "%s\t%s" %(cls, comp._pub_methods_added[cls].replace(str_to_replace, ''))
print "~~~~~~~~~~~~Public Methods Removed~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp._pub_methods_removed, comp._classes_removed):
print "%s\t%s" %(cls, comp._pub_methods_removed[cls].replace(str_to_replace, ''))
print "~~~~~~~~~~~~~~Deprecated Methods~~~~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp.dep_methods):
print "%s\tdeprecated in %s as of %s" %(cls, comp.dep_methods[cls][0], comp.dep_methods[cls][1])
def start(args):
# Create a clone of the current working tree in temp dir
C = CompareVersions(args.src[0], args.versions[0], args.versions[1],
args.tmp)
printReport(C, args)
if not args.dontClean:
C.cleanup()
else:
# Restore the working directory to its original state
C.git_checkout_version(C._git_HEAD)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compare two different'\
' revisions of a C++ source tree under GIT revision control')
parser.add_argument("src", type=str, nargs=1,
help="Path to the working directory")
parser.add_argument("versions", type=str, nargs=2,
help="Two versions to compare. (GIT SHA/branch/tag)")
parser.add_argument("-t", "--tmp", type=str,
default=tempfile.gettempdir(),
help="Path to a temporary directory where the current working tree"\
" can be cloned.(default: System Temp Directory")
parser.add_argument("-w", "--wikiOutput", action='store_true',
help="Print output with wiki markup.")
parser.add_argument("-d", "--dontClean", action='store_true',
help="Do not delete temporary files and directories.")
args = parser.parse_args()
start(args)
| bsd-3-clause |
jhseu/tensorflow | tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py | 9 | 6510 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
NON_PARALLEL = "non_parallel"
EXPERIMENTAL_PARALLEL = "experimental_parallel"
CORE_PARALLEL = "core_parallel"
def _make_fake_dataset_fn(initial_delay_us, remainder_delay_us):
"""Returns a dataset that emulates a remote storage data source.
Returns a dataset factory which creates a dataset with 100 elements that
emulates the performance characteristic of a file-based dataset stored in a
remote storage. In particular, the first element will take an order of
magnitude longer to produce than the remaining elements (100ms vs. 1ms).
Args:
initial_delay_us: How long to wait before producing the first element.
remainder_delay_us: How long to wait before producing subsequent elements.
"""
def fake_dataset_fn(unused):
"""Returns a function that creates a dataset with the specified delays."""
del unused
def make_dataset(time_us, num_elements):
dataset = dataset_ops.Dataset.range(num_elements)
if time_us > 0:
dataset = dataset.apply(testing.sleep(time_us))
return dataset
if not initial_delay_us:
return make_dataset(remainder_delay_us, 100)
return make_dataset(initial_delay_us,
0).concatenate(make_dataset(remainder_delay_us, 100))
return fake_dataset_fn
class ParallelInterleaveBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
def apply_interleave(self, interleave_version, dataset, interleave_fn,
cycle_length, num_parallel_calls):
if interleave_version == NON_PARALLEL:
return dataset.interleave(interleave_fn, cycle_length=cycle_length)
elif interleave_version == EXPERIMENTAL_PARALLEL:
return dataset.apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length=cycle_length))
elif interleave_version == CORE_PARALLEL:
if not num_parallel_calls:
num_parallel_calls = cycle_length
return dataset.interleave(
interleave_fn,
cycle_length=cycle_length,
num_parallel_calls=num_parallel_calls)
else:
raise ValueError("Unknown version: " + interleave_version)
def make_dataset(self,
interleave_version,
initial_delay,
remainder_delay,
cycle_length,
num_parallel_calls=None):
dataset = dataset_ops.Dataset.range(1).repeat()
interleave_fn = _make_fake_dataset_fn(initial_delay, remainder_delay)
return self.apply_interleave(interleave_version, dataset, interleave_fn,
cycle_length, num_parallel_calls)
def _benchmark(self,
interleave_version,
num_elements,
initial_delay_us=0,
remainder_delay_us=0,
cycle_length=10,
iters=100,
num_parallel_calls=None,
attach_stats_aggregator=False,
name=None):
ds = self.make_dataset(interleave_version, initial_delay_us,
remainder_delay_us, cycle_length, num_parallel_calls)
if attach_stats_aggregator:
aggregator = stats_aggregator.StatsAggregator()
opts = dataset_ops.Options()
opts.experimental_stats.aggregator = aggregator
ds = ds.with_options(opts)
ds = ds.skip(num_elements)
deltas = []
for _ in range(iters):
start = time.time()
next(iter(ds))
deltas.append(time.time() - start)
self.report_benchmark(iters=iters, wall_time=np.median(deltas), name=name)
def benchmark_remote_file_simulation(self):
for version in [EXPERIMENTAL_PARALLEL, CORE_PARALLEL]:
self._benchmark(
version,
initial_delay_us=100 * 1000,
remainder_delay_us=1000,
num_elements=5000,
name="remote_file_simulation_" + version)
def benchmark_fast_input(self):
for version in [EXPERIMENTAL_PARALLEL, CORE_PARALLEL]:
self._benchmark(
version, num_elements=200000, name="fast_input_" + version)
# Measure the overhead of parallel interleaves compared to non-parallel
# interleave.
def benchmark_single_cycle(self):
for version in [NON_PARALLEL, EXPERIMENTAL_PARALLEL, CORE_PARALLEL]:
self._benchmark(
version,
cycle_length=1,
num_elements=200000,
name="single_cycle_" + version)
# Compare with a more reasonable cycle length. Experimental interleave
# cannot be compared here because it sets num_parallel_calls = cycle_length.
def benchmark_single_parallel_call(self):
self._benchmark(
CORE_PARALLEL,
num_elements=200000,
num_parallel_calls=1,
name="single_parallel_call_" + CORE_PARALLEL)
def benchmark_long_cycle(self):
for version in [EXPERIMENTAL_PARALLEL, CORE_PARALLEL]:
self._benchmark(
version,
cycle_length=1000,
num_elements=100000,
name="long_cycle_" + version)
def benchmark_stats(self):
self._benchmark(
CORE_PARALLEL,
cycle_length=50,
num_elements=1000,
name="stats",
attach_stats_aggregator=True)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| apache-2.0 |
HarborYuan/cashier | env/Lib/encodings/cp1253.py | 272 | 13094 | """ Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1253',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
'\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\ufffe' # 0xAA -> UNDEFINED
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\u2015' # 0xAF -> HORIZONTAL BAR
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\u0384' # 0xB4 -> GREEK TONOS
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
'\ufffe' # 0xD2 -> UNDEFINED
'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
'\u03bd' # 0xED -> GREEK SMALL LETTER NU
'\u03be' # 0xEE -> GREEK SMALL LETTER XI
'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
tysonclugg/django | tests/queries/models.py | 27 | 16763 | """
Various complex queries that have been problematic in the past.
"""
import threading
from django.db import models
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey(
'self',
models.SET_NULL,
blank=True, null=True,
related_name='children',
)
category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.SET_NULL, blank=True, null=True)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpicklable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note, models.CASCADE, null=True)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo, models.CASCADE)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author, models.CASCADE)
note = models.ForeignKey(Note, models.CASCADE)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True)
def __str__(self):
return self.name
class ReportComment(models.Model):
report = models.ForeignKey(Report, models.CASCADE)
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item, models.CASCADE)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return str(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y', models.CASCADE)
class Y(models.Model):
x1 = models.ForeignKey(X, models.CASCADE, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY', models.CASCADE)
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX, models.CASCADE)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self', models.CASCADE)
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(public=True, tag__name='t1')
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super().get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, models.CASCADE, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, models.CASCADE, primary_key=True)
parent = models.ForeignKey(Member, models.CASCADE, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk, models.CASCADE, null=True)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity, models.CASCADE)
# Multiple foreign keys
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA, models.CASCADE)
b = models.ForeignKey(LeafB, models.CASCADE)
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
# Multi-layer ordering
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, models.SET_NULL, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class Eaten(models.Model):
food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, 'type checking should happen without calling model __iter__'
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.CASCADE)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True)
objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True)
childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk')
def __str__(self):
return self.name
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory, models.CASCADE)
def __str__(self):
return "category item: " + str(self.category)
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory, models.CASCADE)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC, models.CASCADE)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, models.SET_NULL, null=True)
d = models.ForeignKey(ModelD, models.CASCADE)
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, models.CASCADE, to_field='name')
responsibility = models.ForeignKey('Responsibility', models.CASCADE, to_field='description')
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, models.SET_NULL, null=True)
b = models.ForeignKey(FK2, models.SET_NULL, null=True)
c = models.ForeignKey(FK3, models.SET_NULL, null=True)
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter', models.CASCADE)
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph', models.CASCADE)
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class OrderItem(models.Model):
order = models.ForeignKey(Order, models.CASCADE, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, models.CASCADE, related_name='owner')
creator = models.ForeignKey(BaseUser, models.CASCADE, related_name='creator')
def __str__(self):
return self.title
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, models.CASCADE, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent, models.CASCADE)
class Person(models.Model):
name = models.CharField(max_length=128)
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company, models.CASCADE)
employee = models.ForeignKey(Person, models.CASCADE)
title = models.CharField(max_length=128)
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School, models.CASCADE)
class Classroom(models.Model):
name = models.CharField(max_length=20)
has_blackboard = models.NullBooleanField()
school = models.ForeignKey(School, models.CASCADE)
students = models.ManyToManyField(Student, related_name='classroom')
class Teacher(models.Model):
schools = models.ManyToManyField(School)
friends = models.ManyToManyField('self')
class Ticket23605AParent(models.Model):
pass
class Ticket23605A(Ticket23605AParent):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE)
modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE)
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
# db_table names have capital letters to ensure they are quoted in queries.
class Individual(models.Model):
alive = models.BooleanField()
class Meta:
db_table = 'Individual'
class RelatedIndividual(models.Model):
related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual')
class Meta:
db_table = 'RelatedIndividual'
| bsd-3-clause |
manishpatell/erpcustomizationssaiimpex123qwe | addons/base_action_rule/__openerp__.py | 52 | 2009 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automated Action Rules',
'version': '1.0',
'category': 'Sales Management',
'description': """
This module allows to implement action rules for any object.
============================================================
Use automated actions to automatically trigger actions for various screens.
**Example:** A lead created by a specific user may be automatically set to a specific
sales team, or an opportunity which still has status pending after 14 days might
trigger an automatic reminder email.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base', 'resource', 'mail'],
'data': [
'base_action_rule_data.xml',
'base_action_rule_view.xml',
'security/ir.model.access.csv',
],
'demo': [],
'installable': True,
'auto_install': False,
'images': ['images/base_action_rule1.jpeg','images/base_action_rule2.jpeg','images/base_action_rule3.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fuhongliang/odoo | addons/hr_recruitment/res_config.py | 352 | 3627 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-Today OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_applicant_settings(osv.TransientModel):
_name = 'hr.config.settings'
_inherit = ['hr.config.settings', 'fetchmail.config.settings']
_columns = {
'module_document': fields.boolean('Allow the automatic indexation of resumes',
help='Manage your CV\'s and motivation letter related to all applicants.\n'
'-This installs the module document_ftp. This will install the knowledge management module in order to allow you to search using specific keywords through the content of all documents (PDF, .DOCx...)'),
'alias_prefix': fields.char('Default Alias Name for Jobs'),
'alias_domain': fields.char('Alias Domain'),
}
_defaults = {
'alias_domain': lambda self, cr, uid, context: self.pool['mail.alias']._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1],
}
def _find_default_job_alias_id(self, cr, uid, context=None):
alias_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'hr_recruitment.mail_alias_jobs')
if not alias_id:
alias_ids = self.pool['mail.alias'].search(
cr, uid, [
('alias_model_id.model', '=', 'hr.applicant'),
('alias_force_thread_id', '=', False),
('alias_parent_model_id.model', '=', 'hr.job'),
('alias_parent_thread_id', '=', False),
('alias_defaults', '=', '{}')
], context=context)
alias_id = alias_ids and alias_ids[0] or False
return alias_id
def get_default_alias_prefix(self, cr, uid, ids, context=None):
alias_name = False
alias_id = self._find_default_job_alias_id(cr, uid, context=context)
if alias_id:
alias_name = self.pool['mail.alias'].browse(cr, uid, alias_id, context=context).alias_name
return {'alias_prefix': alias_name}
def set_default_alias_prefix(self, cr, uid, ids, context=None):
mail_alias = self.pool.get('mail.alias')
for record in self.browse(cr, uid, ids, context=context):
alias_id = self._find_default_job_alias_id(cr, uid, context=context)
if not alias_id:
create_ctx = dict(context, alias_model_name='hr.applicant', alias_parent_model_name='hr.job')
alias_id = self.pool['mail.alias'].create(cr, uid, {'alias_name': record.alias_prefix}, context=create_ctx)
else:
mail_alias.write(cr, uid, alias_id, {'alias_name': record.alias_prefix}, context=context)
return True
| agpl-3.0 |
wang1352083/pythontool | python-2.7.12-lib/distutils/tests/test_ccompiler.py | 42 | 2356 | """Tests for distutils.ccompiler."""
import os
import unittest
from test.test_support import captured_stdout
from distutils.ccompiler import (gen_lib_options, CCompiler,
get_default_compiler)
from distutils.sysconfig import customize_compiler
from distutils import debug
from distutils.tests import support
class FakeCompiler(object):
def library_dir_option(self, dir):
return "-L" + dir
def runtime_library_dir_option(self, dir):
return ["-cool", "-R" + dir]
def find_library_file(self, dirs, lib, debug=0):
return 'found'
def library_option(self, lib):
return "-l" + lib
class CCompilerTestCase(support.EnvironGuard, unittest.TestCase):
def test_gen_lib_options(self):
compiler = FakeCompiler()
libdirs = ['lib1', 'lib2']
runlibdirs = ['runlib1']
libs = [os.path.join('dir', 'name'), 'name2']
opts = gen_lib_options(compiler, libdirs, runlibdirs, libs)
wanted = ['-Llib1', '-Llib2', '-cool', '-Rrunlib1', 'found',
'-lname2']
self.assertEqual(opts, wanted)
def test_debug_print(self):
class MyCCompiler(CCompiler):
executables = {}
compiler = MyCCompiler()
with captured_stdout() as stdout:
compiler.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), '')
debug.DEBUG = True
try:
with captured_stdout() as stdout:
compiler.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), 'xxx\n')
finally:
debug.DEBUG = False
@unittest.skipUnless(get_default_compiler() == 'unix',
'not testing if default compiler is not unix')
def test_customize_compiler(self):
os.environ['AR'] = 'my_ar'
os.environ['ARFLAGS'] = '-arflags'
# make sure AR gets caught
class compiler:
compiler_type = 'unix'
def set_executables(self, **kw):
self.exes = kw
comp = compiler()
customize_compiler(comp)
self.assertEqual(comp.exes['archiver'], 'my_ar -arflags')
def test_suite():
return unittest.makeSuite(CCompilerTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| mit |
qwefi/nova | nova/console/vmrc_manager.py | 13 | 6065 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VMRC Console Manager."""
from oslo.config import cfg
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova import manager
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import driver as vmwareapi_conn
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('console_driver', 'nova.console.manager')
CONF.import_opt('console_public_hostname', 'nova.console.manager')
class ConsoleVMRCManager(manager.Manager):
"""Manager to handle VMRC connections for accessing instance consoles."""
def __init__(self, console_driver=None, *args, **kwargs):
self.driver = importutils.import_object(CONF.console_driver)
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(ConsoleVMRCManager, self).__init__(*args, **kwargs)
def init_host(self):
self.sessions = {}
self.driver.init_host()
def _get_vim_session(self, pool):
"""Get VIM session for the pool specified."""
vim_session = None
if pool['id'] not in self.sessions.keys():
vim_session = vmwareapi_conn.VMwareAPISession(
pool['address'],
pool['username'],
pool['password'],
CONF.console_vmrc_error_retries)
self.sessions[pool['id']] = vim_session
return self.sessions[pool['id']]
def _generate_console(self, context, pool, name, instance_id, instance):
"""Sets up console for the instance."""
LOG.debug(_('Adding console'))
password = self.driver.generate_password(
self._get_vim_session(pool),
pool,
instance.name)
console_data = {'instance_name': name,
'instance_id': instance_id,
'password': password,
'pool_id': pool['id']}
console_data['port'] = self.driver.get_port(context)
console = self.db.console_create(context, console_data)
self.driver.setup_console(context, console)
return console
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
"""Adds a console for the instance.
If it is one time password, then we generate new console credentials.
"""
instance = self.db.instance_get(context, instance_id)
host = instance['host']
name = instance['name']
pool = self.get_pool_for_instance_host(context, host)
try:
console = self.db.console_get_by_pool_instance(context,
pool['id'],
instance['uuid'])
if self.driver.is_otp():
console = self._generate_console(context,
pool,
name,
instance_id,
instance)
except exception.NotFound:
console = self._generate_console(context,
pool,
name,
instance_id,
instance)
return console['id']
def remove_console(self, context, console_id, **_kwargs):
"""Removes a console entry."""
try:
console = self.db.console_get(context, console_id)
except exception.NotFound:
LOG.debug(_('Tried to remove non-existent console '
'%(console_id)s.') % {'console_id': console_id})
return
LOG.debug(_('Removing console '
'%(console_id)s.') % {'console_id': console_id})
self.db.console_delete(context, console_id)
self.driver.teardown_console(context, console)
def get_pool_for_instance_host(self, context, instance_host):
"""Gets console pool info for the instance."""
context = context.elevated()
console_type = self.driver.console_type
try:
pool = self.db.console_pool_get_by_host_type(context,
instance_host,
self.host,
console_type)
except exception.NotFound:
pool_info = self.compute_rpcapi.get_console_pool_info(context,
console_type, instance_host)
pool_info['password'] = self.driver.fix_pool_password(
pool_info['password'])
pool_info['host'] = self.host
# ESX Address or Proxy Address
public_host_name = pool_info['address']
if CONF.console_public_hostname:
public_host_name = CONF.console_public_hostname
pool_info['public_hostname'] = public_host_name
pool_info['console_type'] = console_type
pool_info['compute_host'] = instance_host
pool = self.db.console_pool_create(context, pool_info)
return pool
| apache-2.0 |
james4424/nest-simulator | topology/examples/connex.py | 4 | 2161 | # -*- coding: utf-8 -*-
#
# connex.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers of iaf_neurons,
connect with circular mask, flat probability,
visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import nest
import nest.topology as topo
import pylab
pylab.ion()
nest.ResetKernel()
# create two test layers
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_neuron'})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_neuron'})
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.5,
'weights': {'uniform': {'min': 0.5, 'max': 2.0}},
'delays': 1.0}
topo.ConnectLayers(a, b, conndict)
# plot targets of neurons in different grid locations
# first, clear existing figure, get current figure
pylab.clf()
fig = pylab.gcf()
# plot targets of two source neurons into same figure, with mask
for src_pos in [[15, 15], [0, 0]]:
# obtain node id for center
src = topo.GetElement(a, src_pos)
topo.PlotTargets(src, b, mask=conndict['mask'], fig=fig)
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-2.0, 2.0, -2.0, 2.0])
pylab.axes().set_aspect('equal', 'box')
pylab.title('Connection targets')
# pylab.savefig('connex.pdf')
| gpl-2.0 |
txiner/db-xiner | integration_test/test_project_order.py | 1 | 3192 | import unittest
from disco.core import result_iterator
from hustle import select, Table
from setup import IMPS
from hustle.core.settings import Settings, overrides
class TestProjectOrder(unittest.TestCase):
def setUp(self):
overrides['server'] = 'disco://localhost'
overrides['dump'] = False
overrides['nest'] = False
self.settings = Settings()
def tearDown(self):
pass
def test_single_int_order(self):
imps = Table.from_tag(IMPS)
res = select(imps.ad_id, imps.date, imps.cpm_millis, where=imps.date == '2014-01-27', order_by=imps.cpm_millis)
lowest = 0
for (a, d, c), _ in result_iterator(res):
self.assertLessEqual(lowest, c)
lowest = c
def test_combo_order(self):
imps = Table.from_tag(IMPS)
res = select(imps.ad_id, imps.date, imps.cpm_millis,
where=imps.date > '2014-01-21',
order_by=(imps.date, imps.cpm_millis))
lowest_cpm = 0
lowest_date = '2000-01-01'
for (a, d, c), _ in result_iterator(res):
if lowest_date == d:
self.assertLessEqual(lowest_cpm, c)
lowest_cpm = c
else:
self.assertLessEqual(lowest_date, d)
lowest_date = d
lowest_cpm = c
def test_combo_descending(self):
imps = Table.from_tag(IMPS)
res = select(imps.ad_id, imps.date, imps.cpm_millis,
where=imps.date > '2014-01-21',
order_by=(imps.date, imps.cpm_millis),
desc=True)
highest_cpm = 1000000000
highest_date = '2222-01-01'
for (a, d, c), _ in result_iterator(res):
if highest_date == d:
self.assertGreaterEqual(highest_cpm, c)
highest_cpm = c
else:
self.assertGreaterEqual(highest_date, d)
highest_date = d
highest_cpm = c
def test_high_limit(self):
imps = Table.from_tag(IMPS)
res = select(imps.ad_id, imps.date, imps.cpm_millis, where=imps.date == '2014-01-27', limit=100)
results = [c for c, _ in result_iterator(res)]
self.assertEqual(len(results), 10)
def test_low_limit(self):
imps = Table.from_tag(IMPS)
res = select(imps.ad_id, imps.date, imps.cpm_millis, where=imps.date == '2014-01-27', limit=4)
results = [c for c, _ in result_iterator(res)]
self.assertEqual(len(results), 4)
def test_distinct(self):
imps = Table.from_tag(IMPS)
res = select(imps.ad_id, imps.date, where=imps.date == '2014-01-27', distinct=True)
results = [c for c, _ in result_iterator(res)]
self.assertEqual(len(results), 8)
def test_overall(self):
imps = Table.from_tag(IMPS)
res = select(imps.ad_id, imps.date, where=imps.date == '2014-01-27', distinct=True, limit=4,
order_by='ad_id', desc=True)
results = [a for (a, d), _ in result_iterator(res)]
self.assertEqual(len(results), 4)
self.assertListEqual(results, [30019, 30018, 30017, 30015])
| mit |
adamjmcgrath/glancydesign | django/contrib/gis/geos/base.py | 321 | 1698 | from ctypes import c_void_p
from types import NoneType
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
GEOJSON = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, (self.ptr_type, NoneType)):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
| bsd-3-clause |
abaditsegay/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32comext/shell/demos/servers/copy_hook.py | 37 | 2888 | # A sample shell copy hook.
# To demostrate:
# * Execute this script to register the context menu.
# * Open Windows Explorer
# * Attempt to move or copy a directory.
# * Note our hook's dialog is displayed.
import sys, os
import pythoncom
from win32com.shell import shell, shellcon
import win32gui
import win32con
import winerror
# Our shell extension.
class ShellExtension:
_reg_progid_ = "Python.ShellExtension.CopyHook"
_reg_desc_ = "Python Sample Shell Extension (copy hook)"
_reg_clsid_ = "{1845b6ba-2bbd-4197-b930-46d8651497c1}"
_com_interfaces_ = [shell.IID_ICopyHook]
_public_methods_ = ["CopyCallBack"]
def CopyCallBack(self, hwnd, func, flags,
srcName, srcAttr, destName, destAttr):
# This function should return:
# IDYES Allows the operation.
# IDNO Prevents the operation on this folder but continues with any other operations that have been approved (for example, a batch copy operation).
# IDCANCEL Prevents the current operation and cancels any pending operations.
print "CopyCallBack", hwnd, func, flags, srcName, srcAttr, destName, destAttr
return win32gui.MessageBox(hwnd, "Allow operation?", "CopyHook",
win32con.MB_YESNO)
def DllRegisterServer():
import _winreg
key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT,
"directory\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
_winreg.SetValueEx(key, None, 0, _winreg.REG_SZ, ShellExtension._reg_clsid_)
key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT,
"*\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
_winreg.SetValueEx(key, None, 0, _winreg.REG_SZ, ShellExtension._reg_clsid_)
print ShellExtension._reg_desc_, "registration complete."
def DllUnregisterServer():
import _winreg
try:
key = _winreg.DeleteKey(_winreg.HKEY_CLASSES_ROOT,
"directory\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
except WindowsError, details:
import errno
if details.errno != errno.ENOENT:
raise
try:
key = _winreg.DeleteKey(_winreg.HKEY_CLASSES_ROOT,
"*\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
except WindowsError, details:
import errno
if details.errno != errno.ENOENT:
raise
print ShellExtension._reg_desc_, "unregistration complete."
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellExtension,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
#!/usr/bin/env python
| apache-2.0 |
yangchaogit/shadowsocks | tests/coverage_server.py | 1072 | 1655 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
if __name__ == '__main__':
import tornado.ioloop
import tornado.web
import urllib
class MainHandler(tornado.web.RequestHandler):
def get(self, project):
try:
with open('/tmp/%s-coverage' % project, 'rb') as f:
coverage = f.read().strip()
n = int(coverage.strip('%'))
if n >= 80:
color = 'brightgreen'
else:
color = 'yellow'
self.redirect(('https://img.shields.io/badge/'
'coverage-%s-%s.svg'
'?style=flat') %
(urllib.quote(coverage), color))
except IOError:
raise tornado.web.HTTPError(404)
application = tornado.web.Application([
(r"/([a-zA-Z0-9\-_]+)", MainHandler),
])
if __name__ == "__main__":
application.listen(8888, address='127.0.0.1')
tornado.ioloop.IOLoop.instance().start()
| apache-2.0 |
edoakes/pyscrape | parsing/parse_repos.py | 1 | 2547 | import os, requests, json, subprocess, time, pymongo, signal, sys
from multiprocessing import Process, Queue, Lock, Value, current_process
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir, '..'))
from ..parse import parse_files
clone_dir = '/home/data/repos'
fromdb = client.metadata.copy
todb = client.full.repos
djangodb = client.full.django
parsers = 12
def parser(lock):
client = pymongo.MongoClient()
while True:
with lock:
repo = fromdb.find_one()
if not repo:
return
fromdb.delete_one({'id': repo['id']})
path = os.path.join(clone_dir, str(repo['id'])[0], str(repo['id'])[1], str(repo['id']))
if not os.path.isdir(path):
client.metadata.cloned.delete_one({'id': repo['id']})
continue
start = time.time()
parse_results = scrape(path)
t = time.time() - start
print('parsing took %fs' % t)
pylines = 0
for f in parse_results[0]:
pylines += f['lines']
repo['pyfiles'] = parse_results[0]
repo['filetypes'] = parse_results[1]
repo['pylines'] = pylines
todb.insert_one(repo)
for f in repo['pyfiles']:
if 'django' in f['mods']:
djangodb.insert_one(repo)
break
def scrape(path):
pyfiles = []
dirs = [path]
ftypes = {}
while len(dirs) != 0:
d = dirs.pop(-1)
for rel in os.listdir(d):
f = os.path.join(d, rel)
if os.path.islink(f):
continue
elif os.path.isdir(f):
dirs.append(f)
elif os.path.isfile(f):
if f.endswith('.py'):
pyfiles.append(f)
ftype = os.path.splitext(f)[1]
if ftype == '':
ftype = 'none'
else:
ftype = ftype.strip('.').lower()
if not ftype in ftypes:
ftypes[ftype] = []
ftypes[ftype].append(os.path.getsize(f))
return [parse_files(pyfiles), ftypes]
def main():
aggstart = time.time()
lock = Lock()
procs = []
for k in range(parsers):
p = Process(target=parser, args=(lock,))
p.start()
procs.append(p)
for proc in procs:
proc.join()
proc.terminate()
return time.time() - aggstart
if __name__ == '__main__':
t = main()
print('finished parsing in %fs' % t)
| apache-2.0 |
embeddedarm/android_external_chromium_org | tools/multi-process-rss.py | 128 | 3646 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Counts a resident set size (RSS) of multiple processes without double-counts.
# If they share the same page frame, the page frame is counted only once.
#
# Usage:
# ./multi-process-rss.py <pid>|<pid>r [...]
#
# If <pid> has 'r' at the end, all descendants of the process are accounted.
#
# Example:
# ./multi-process-rss.py 12345 23456r
#
# The command line above counts the RSS of 1) process 12345, 2) process 23456
# and 3) all descendant processes of process 23456.
import collections
import logging
import os
import psutil
import sys
if sys.platform.startswith('linux'):
_TOOLS_PATH = os.path.dirname(os.path.abspath(__file__))
_TOOLS_LINUX_PATH = os.path.join(_TOOLS_PATH, 'linux')
sys.path.append(_TOOLS_LINUX_PATH)
import procfs # pylint: disable=F0401
class _NullHandler(logging.Handler):
def emit(self, record):
pass
_LOGGER = logging.getLogger('multi-process-rss')
_LOGGER.addHandler(_NullHandler())
def _recursive_get_children(pid):
try:
children = psutil.Process(pid).get_children()
except psutil.error.NoSuchProcess:
return []
descendant = []
for child in children:
descendant.append(child.pid)
descendant.extend(_recursive_get_children(child.pid))
return descendant
def list_pids(argv):
pids = []
for arg in argv[1:]:
try:
if arg.endswith('r'):
recursive = True
pid = int(arg[:-1])
else:
recursive = False
pid = int(arg)
except ValueError:
raise SyntaxError("%s is not an integer." % arg)
else:
pids.append(pid)
if recursive:
children = _recursive_get_children(pid)
pids.extend(children)
pids = sorted(set(pids), key=pids.index) # uniq: maybe slow, but simple.
return pids
def count_pageframes(pids):
pageframes = collections.defaultdict(int)
pagemap_dct = {}
for pid in pids:
maps = procfs.ProcMaps.load(pid)
if not maps:
_LOGGER.warning('/proc/%d/maps not found.' % pid)
continue
pagemap = procfs.ProcPagemap.load(pid, maps)
if not pagemap:
_LOGGER.warning('/proc/%d/pagemap not found.' % pid)
continue
pagemap_dct[pid] = pagemap
for pid, pagemap in pagemap_dct.iteritems():
for vma in pagemap.vma_internals.itervalues():
for pageframe, number in vma.pageframes.iteritems():
pageframes[pageframe] += number
return pageframes
def count_statm(pids):
resident = 0
shared = 0
private = 0
for pid in pids:
statm = procfs.ProcStatm.load(pid)
if not statm:
_LOGGER.warning('/proc/%d/statm not found.' % pid)
continue
resident += statm.resident
shared += statm.share
private += (statm.resident - statm.share)
return (resident, shared, private)
def main(argv):
logging_handler = logging.StreamHandler()
logging_handler.setLevel(logging.WARNING)
logging_handler.setFormatter(logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s'))
_LOGGER.setLevel(logging.WARNING)
_LOGGER.addHandler(logging_handler)
if sys.platform.startswith('linux'):
logging.getLogger('procfs').setLevel(logging.WARNING)
logging.getLogger('procfs').addHandler(logging_handler)
pids = list_pids(argv)
pageframes = count_pageframes(pids)
else:
_LOGGER.error('%s is not supported.' % sys.platform)
return 1
# TODO(dmikurube): Classify this total RSS.
print len(pageframes) * 4096
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
gennad/www_gennad_org | django/core/management/commands/inspectdb.py | 203 | 7614 | import keyword
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield ''
yield 'from %s import models' % self.db_module
yield ''
for table_name in connection.introspection.get_table_list(cursor):
yield 'class %s(models.Model):' % table2model(table_name)
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
column_name = row[0]
att_name = column_name.lower()
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
# If the column name can't be used verbatim as a Python
# attribute, set the "db_column" for this Field.
if ' ' in att_name or '-' in att_name or keyword.iskeyword(att_name) or column_name != att_name:
extra_params['db_column'] = column_name
# Modify the field name to make it Python-compatible.
if ' ' in att_name:
att_name = att_name.replace(' ', '_')
comment_notes.append('Field renamed to remove spaces.')
if '-' in att_name:
att_name = att_name.replace('-', '_')
comment_notes.append('Field renamed to remove dashes.')
if column_name != att_name:
comment_notes.append('Field name made lowercase.')
if i in relations:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
field_type = 'ForeignKey(%s' % rel_to
if att_name.endswith('_id'):
att_name = att_name[:-3]
else:
extra_params['db_column'] = column_name
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
field_type += '('
if keyword.iskeyword(att_name):
att_name += '_field'
comment_notes.append('Field renamed because it was a Python reserved word.')
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(['%s=%r' % (k, v) for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [' class Meta:',
' db_table = %r' % table_name,
'']
| bsd-3-clause |
OneBitSoftware/jwtSample | src/Spa/env1/Lib/site-packages/setuptools/command/install_scripts.py | 505 | 2231 | from distutils import log
import distutils.command.install_scripts as orig
import os
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
| mit |
RandyLowery/erpnext | erpnext/patches/v6_27/fix_recurring_order_status.py | 54 | 1936 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
for doc in (
{
"doctype": "Sales Order",
"stock_doctype": "Delivery Note",
"invoice_doctype": "Sales Invoice",
"stock_doctype_ref_field": "against_sales_order",
"invoice_ref_field": "sales_order",
"qty_field": "delivered_qty"
},
{
"doctype": "Purchase Order",
"stock_doctype": "Purchase Receipt",
"invoice_doctype": "Purchase Invoice",
"stock_doctype_ref_field": "prevdoc_docname",
"invoice_ref_field": "purchase_order",
"qty_field": "received_qty"
}):
order_list = frappe.db.sql("""select name from `tab{0}`
where docstatus=1 and is_recurring=1
and ifnull(recurring_id, '') != name and creation >= '2016-01-25'"""
.format(doc["doctype"]), as_dict=1)
for order in order_list:
frappe.db.sql("""update `tab{0} Item`
set {1}=0, billed_amt=0 where parent=%s""".format(doc["doctype"],
doc["qty_field"]), order.name)
# Check against Delivery Note and Purchase Receipt
stock_doc_list = frappe.db.sql("""select distinct parent from `tab{0} Item`
where docstatus=1 and ifnull({1}, '')=%s"""
.format(doc["stock_doctype"], doc["stock_doctype_ref_field"]), order.name)
if stock_doc_list:
for dn in stock_doc_list:
frappe.get_doc(doc["stock_doctype"], dn[0]).update_qty(update_modified=False)
# Check against Invoice
invoice_list = frappe.db.sql("""select distinct parent from `tab{0} Item`
where docstatus=1 and ifnull({1}, '')=%s"""
.format(doc["invoice_doctype"], doc["invoice_ref_field"]), order.name)
if invoice_list:
for dn in invoice_list:
frappe.get_doc(doc["invoice_doctype"], dn[0]).update_qty(update_modified=False)
frappe.get_doc(doc["doctype"], order.name).set_status(update=True, update_modified=False) | gpl-3.0 |
ramalho/eagle-py | examples/quadratic_function_graph.py | 2 | 4017 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from eagle import *
import math
epsilon = 0.000001
graph_font_size = 8
def clear(app, button):
app["graph"].clear()
def quadratic(a, b, c, x):
return a * (x ** 2) + b * x + c
def data_changed(app, *args):
a = app["a"]
b = app["b"]
c = app["c"]
graph = app["graph"]
if abs(a) < epsilon:
error("You must provide a Second Degree Polynom.")
return
delta = (b ** 2) - (4 * a * c)
if delta < 0:
print("Complex root not supported: delta (%s) < 0" % delta)
return
graph.clear()
delta_sqrt = math.sqrt(delta)
r0 = (-b - delta_sqrt) / (2 * a)
r1 = (-b + delta_sqrt) / (2 * a)
# f(x) = ax² + bx + c
# f'(x) = 2ax + b
# center_x: f'(center_x) == 0: 2a*center_x + b == 0; center_x = -b/2a
# center_y: f(center_x) == a*center_x² + b*center_x + c
# a*(-b/2a)² + b*(-b/2a) + c
# b²/4a - b²/2a + c
# (b² - 2b² + 4ac)/4a
# (-b² + 4ac)/4a
# -(b² - 4ac)/4a = -delta/4a
center_x = -b / (2 * a)
center_y = -delta / (4 * a)
extreme_y = quadratic(a, b, c, r0)
# draw some area around roots
d = abs(r1 - r0) / 4.0
if d < epsilon:
# both roots have the same value
d = float(graph.width) / float(graph.height)
if r0 > r1:
r0, r1 = r1, r0
x0 = r0 - d
x1 = r1 + d
if a > 0:
y0 = center_y - d
y1 = extreme_y + d
else:
y0 = extreme_y - d
y1 = center_y + d
scale_x = (x1 - x0) / float(graph.width)
scale_y = (y1 - y0) / float(graph.height)
i2 = int((center_x - x0) / scale_x)
j = int((extreme_y - y0) / scale_y)
# If visible, draw y-axis
if x0 <= 0 <= x1:
i = int(-x0 / scale_x)
graph.draw_line(i, 0, i, graph.height, color="#666666", size=1)
else:
if x0 > 0:
i = graph.width
else:
i = 0
graph.draw_line(i, j, i2, j, color="#999999")
txt = str(center_y)
j += 2
graph.draw_text(txt, i, j, fgcolor="red", font_size=graph_font_size)
# If visible, draw x-axis (should always be)
if y0 <= 0 <= y1:
j = graph.height - int(-y0 / scale_y)
graph.draw_line(0, j, graph.width, j, color="#666666", size=1)
j += 2
i = int((r0 - x0) / scale_x)
graph.draw_line(i, j - 12, i, j + 10, color="#999999")
txt = str(r0)
graph.draw_text(txt, i, j, fgcolor="red", font_size=graph_font_size)
i = int((r1 - x0) / scale_x)
graph.draw_line(i, j - 12, i, j + 10, color="#999999")
txt = str(r1)
graph.draw_text(txt, i, j, fgcolor="red", font_size=graph_font_size)
# Sample points, we'll draw as connected lines
points = []
x = x0
for i in xrange(graph.width):
y = quadratic(a, b, c, x)
j = graph.height - int((y - y0) / scale_y)
points.append((i, j))
x += scale_x
graph.draw_lines(points, color="red")
# Draw text with expression and roots
if a < 0:
y = 0
else:
y = graph.height - 30
txt = ("f(x) = %0.3e * x² + %0.3e * x + %0.3e\n"
"roots: {%s, %s}") % (a, b, c, r0, r1)
graph.draw_text(txt, 2, y, fgcolor="red", bgcolor="white",
font_size=graph_font_size)
App(title="Second Degree Polynom Graph and Roots",
data_changed_callback=data_changed,
window_size=(800, 600),
top=(Spin(id="a", label="x² * ", value=1.0, digits=5),
Spin(id="b", label="+ x * ", value=-1.0),
Spin(id="c", label="+ ", value=-2),
),
center=Canvas(id="graph",
width=700,
height=400,
label=None,
bgcolor="white",
scrollbars=False),
bottom=(Button(id="calc", label="Calculate", callback=data_changed),
Button(id="clear", stock="clear", callback=clear),
),
)
run()
| lgpl-2.1 |
jiwanlimbu/aura | keystone/auth/schema.py | 2 | 3011 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
token_issue = {
'type': 'object',
'properties': {
'identity': {
'type': 'object',
'properties': {
'methods': {
'type': 'array',
'items': {'type': 'string', },
},
'password': {
'type': 'object',
'properties': {
'user': {
'type': 'object',
'properties': {
'id': {'type': 'string', },
'name': {'type': 'string', },
'password': {'type': 'string', },
'domain': {
'type': 'object',
'properties': {
'id': {'type': 'string', },
'name': {'type': 'string', },
},
},
},
},
},
},
'token': {
'type': 'object',
'properties': {
'id': {
'type': 'string',
},
},
'required': ['id', ],
},
},
'required': ['methods', ],
},
'scope': {
'type': ['object', 'string'],
'properties': {
'project': {
'type': 'object',
'properties': {
'name': {'type': 'string', },
'id': {'type': 'string', },
'domain': {
'type': 'object',
'properties': {
'id': {'type': 'string', },
'name': {'type': 'string', },
},
},
},
},
'domain': {
'type': 'object',
'properties': {
'id': {'type': 'string', },
'name': {'type': 'string', },
},
},
},
},
},
'required': ['identity', ],
}
| apache-2.0 |
sander76/home-assistant | homeassistant/components/ssdp/__init__.py | 4 | 6916 | """The SSDP integration."""
import asyncio
from datetime import timedelta
import logging
from typing import Any, Mapping
import aiohttp
from async_upnp_client.search import async_search
from defusedxml import ElementTree
from netdisco import ssdp, util
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.loader import async_get_ssdp
DOMAIN = "ssdp"
SCAN_INTERVAL = timedelta(seconds=60)
# Attributes for accessing info from SSDP response
ATTR_SSDP_LOCATION = "ssdp_location"
ATTR_SSDP_ST = "ssdp_st"
ATTR_SSDP_USN = "ssdp_usn"
ATTR_SSDP_EXT = "ssdp_ext"
ATTR_SSDP_SERVER = "ssdp_server"
# Attributes for accessing info from retrieved UPnP device description
ATTR_UPNP_DEVICE_TYPE = "deviceType"
ATTR_UPNP_FRIENDLY_NAME = "friendlyName"
ATTR_UPNP_MANUFACTURER = "manufacturer"
ATTR_UPNP_MANUFACTURER_URL = "manufacturerURL"
ATTR_UPNP_MODEL_DESCRIPTION = "modelDescription"
ATTR_UPNP_MODEL_NAME = "modelName"
ATTR_UPNP_MODEL_NUMBER = "modelNumber"
ATTR_UPNP_MODEL_URL = "modelURL"
ATTR_UPNP_SERIAL = "serialNumber"
ATTR_UPNP_UDN = "UDN"
ATTR_UPNP_UPC = "UPC"
ATTR_UPNP_PRESENTATION_URL = "presentationURL"
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up the SSDP integration."""
async def initialize(_):
scanner = Scanner(hass, await async_get_ssdp(hass))
await scanner.async_scan(None)
async_track_time_interval(hass, scanner.async_scan, SCAN_INTERVAL)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, initialize)
return True
class Scanner:
"""Class to manage SSDP scanning."""
def __init__(self, hass, integration_matchers):
"""Initialize class."""
self.hass = hass
self.seen = set()
self._entries = []
self._integration_matchers = integration_matchers
self._description_cache = {}
async def _on_ssdp_response(self, data: Mapping[str, Any]) -> None:
"""Process an ssdp response."""
self.async_store_entry(
ssdp.UPNPEntry({key.lower(): item for key, item in data.items()})
)
@callback
def async_store_entry(self, entry):
"""Save an entry for later processing."""
self._entries.append(entry)
async def async_scan(self, _):
"""Scan for new entries."""
await async_search(async_callback=self._on_ssdp_response)
await self._process_entries()
# We clear the cache after each run. We track discovered entries
# so will never need a description twice.
self._description_cache.clear()
self._entries.clear()
async def _process_entries(self):
"""Process SSDP entries."""
entries_to_process = []
unseen_locations = set()
for entry in self._entries:
key = (entry.st, entry.location)
if key in self.seen:
continue
self.seen.add(key)
entries_to_process.append(entry)
if (
entry.location is not None
and entry.location not in self._description_cache
):
unseen_locations.add(entry.location)
if not entries_to_process:
return
if unseen_locations:
await self._fetch_descriptions(list(unseen_locations))
tasks = []
for entry in entries_to_process:
info, domains = self._process_entry(entry)
for domain in domains:
_LOGGER.debug("Discovered %s at %s", domain, entry.location)
tasks.append(
self.hass.config_entries.flow.async_init(
domain, context={"source": DOMAIN}, data=info
)
)
if tasks:
await asyncio.gather(*tasks)
async def _fetch_descriptions(self, locations):
"""Fetch descriptions from locations."""
for idx, result in enumerate(
await asyncio.gather(
*[self._fetch_description(location) for location in locations],
return_exceptions=True,
)
):
location = locations[idx]
if isinstance(result, Exception):
_LOGGER.exception(
"Failed to fetch ssdp data from: %s", location, exc_info=result
)
continue
self._description_cache[location] = result
def _process_entry(self, entry):
"""Process a single entry."""
info = {"st": entry.st}
for key in "usn", "ext", "server":
if key in entry.values:
info[key] = entry.values[key]
if entry.location:
# Multiple entries usually share same location. Make sure
# we fetch it only once.
info_req = self._description_cache.get(entry.location)
if info_req is None:
return (None, [])
info.update(info_req)
domains = set()
for domain, matchers in self._integration_matchers.items():
for matcher in matchers:
if all(info.get(k) == v for (k, v) in matcher.items()):
domains.add(domain)
if domains:
return (info_from_entry(entry, info), domains)
return (None, [])
async def _fetch_description(self, xml_location):
"""Fetch an XML description."""
session = self.hass.helpers.aiohttp_client.async_get_clientsession()
try:
resp = await session.get(xml_location, timeout=5)
xml = await resp.text(errors="replace")
# Samsung Smart TV sometimes returns an empty document the
# first time. Retry once.
if not xml:
resp = await session.get(xml_location, timeout=5)
xml = await resp.text(errors="replace")
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
_LOGGER.debug("Error fetching %s: %s", xml_location, err)
return {}
try:
tree = ElementTree.fromstring(xml)
except ElementTree.ParseError as err:
_LOGGER.debug("Error parsing %s: %s", xml_location, err)
return {}
return util.etree_to_dict(tree).get("root", {}).get("device", {})
def info_from_entry(entry, device_info):
"""Get info from an entry."""
info = {
ATTR_SSDP_LOCATION: entry.location,
ATTR_SSDP_ST: entry.st,
}
if device_info:
info.update(device_info)
info.pop("st", None)
if "usn" in info:
info[ATTR_SSDP_USN] = info.pop("usn")
if "ext" in info:
info[ATTR_SSDP_EXT] = info.pop("ext")
if "server" in info:
info[ATTR_SSDP_SERVER] = info.pop("server")
return info
| apache-2.0 |
cpatrickalves/simprev | modelos/modulos_fazenda/depesas.py | 1 | 20278 | # -*- coding: utf-8 -*-
"""
@author: Patrick Alves
"""
from util.tabelas import LerTabelas
import pandas as pd
# Calcula despesas com benefícios
# Baseado nas Equações da LDO de 2018 e Planilhas do MF
def calc_despesas(despesas, estoques, concessoes, valCoBen, salarios, valMedBenef, probabilidades, nparcelas, resultados, parametros):
periodo = parametros['periodo']
# Objeto criado para uso das funções da Classe LerTabelas
dados = LerTabelas()
ult_ano_estoq = periodo[0]-1 # 2014
##### Calcula despesa com o dados conhecidos (2011-2014)
# O valor no banco de dados é mensal
for beneficio in despesas.keys():
# Auxílio doença para os que recebem Acima do Piso
if 'AuxdUrbAcim' in beneficio:
despesas[beneficio] = valCoBen[beneficio] * nparcelas[beneficio]
# Aposentadorias e Pensões para quem recebe acima do piso
elif 'Acim' in beneficio:
desp_dez = despesas[beneficio] # despesas dos mes de dezembro
despesas[beneficio] = desp_dez * nparcelas[beneficio]
# Demais auxílios
elif 'Aux' in beneficio:
qtd_benef = 0
if 'Auxd' in beneficio:
qtd_benef = concessoes[beneficio][ult_ano_estoq]
else:
qtd_benef = estoques[beneficio][ult_ano_estoq]
# OBS: Para o Auxílio-acidente a regra é 50% do valor do “Salário de Benefício”
# fonte: http://www.previdencia.gov.br/servicos-ao-cidadao/informacoes-gerais/valor-beneficios-incapacidade/
if 'Auxa' in beneficio:
valor_benef = salarios['salarioMinimo'][ult_ano_estoq] * 0.5
else:
valor_benef = salarios['salarioMinimo'][ult_ano_estoq]
npar = nparcelas[beneficio][ult_ano_estoq]
# Calcula a despesa para cada benefício
despesas[beneficio][ult_ano_estoq] = qtd_benef * valor_benef * npar
# Demais tipos
else:
estoq_total = estoques[beneficio][ult_ano_estoq]
estoq_total_ano_ant = estoques[beneficio][ult_ano_estoq-1]
valor_benef = salarios['salarioMinimo'][ult_ano_estoq]
npar = nparcelas[beneficio][ult_ano_estoq]
estoq_medio = ((estoq_total + estoq_total_ano_ant)/2)
# Calcula a despesa para cada benefício (Eq. 44)
despesas[beneficio][ult_ano_estoq] = estoq_medio * valor_benef * npar
##### Calcula despesas para clientelas Rurais, Urbanas e assistenciais que recebem o Piso (1 SM) #####
for clientela in ['Rur', 'Piso', 'Rmv', 'Loas']:
beneficios = dados.get_id_beneficios(clientela)
for beneficio in beneficios:
# Pula o SalMat pois o calculo é diferente e feito posteriormente
if 'SalMat' in beneficio:
continue
# Verifica se existe estoque para o beneficio
if beneficio in estoques:
for ano in periodo:
# verifica se existe projeção para esse ano
if ano in estoques[beneficio].columns:
# Calculo para Auxílios
if 'Aux' in beneficio:
qtd_benef = estoques[beneficio][ano]
valor_benef = salarios['salarioMinimo'][ano]
# OBS: Para o Auxílio-acidente a regra é 50% do valor do “Salário de Benefício”
# fonte: http://www.previdencia.gov.br/servicos-ao-cidadao/informacoes-gerais/valor-beneficios-incapacidade/
if 'Auxa' in beneficio:
valor_benef = salarios['salarioMinimo'][ano] * 0.5
npar = nparcelas[beneficio][ano]
# Calcula a despesa para cada benefício
despesas[beneficio][ano] = qtd_benef * valor_benef * npar
# Cálculo para os demais
else:
# Obtem o estoques do ano e do ano anterior
estoq_total = estoques[beneficio][ano]
estoq_total_ano_ant = estoques[beneficio][ano-1]
valor_benef = salarios['salarioMinimo'][ano]
npar = nparcelas[beneficio][ano]
# Calcula a despesa para cada benefício (Eq. 44)
despesas[beneficio][ano] = ((estoq_total + estoq_total_ano_ant)/2) * valor_benef * npar
##### Calcula despesas para clientela Urbana que recebe acima do Piso #####
for beneficio in dados.get_id_beneficios('Acim'):
# Pula o SalMat pois o calculo é diferente e feito posteriormente
if 'SalMat' in beneficio:
continue
# Verifica se existem estoques
if beneficio in estoques:
sexo = beneficio[-1]
# Caso o benefício seja uma Apos. por Tempo de
# Contribuição Normal, Professor ou Especial
# Eq. 49 e 50
#if ('tcn' in beneficio or 'tce' in beneficio or 'tcp' in beneficio):
#fator_prev = 1
#ajuste = 1
#val_med_novos_ben = fator_prev * ajuste * salarios['SalMedSegUrbAcimPnad'+sexo]
for ano in periodo:
if ano in estoques[beneficio].columns: # verifica se existe projeção para esse ano
# Cálculo das despesas com os Auxílios
if 'Aux' in beneficio:
est_ano = estoques[beneficio][ano]
vmb = valMedBenef[beneficio][ano]
npar = nparcelas[beneficio][ano]
# Eq. 46
despesas[beneficio][ano] = est_ano * vmb * npar
else:
# Cálculo para Aposentadorias e Pensões
val_med_novos_ben = valMedBenef[beneficio]
# Idade de 1 a 90 anos
for idade in range(1,91):
# Para a idade de 90 anos
if idade == 90:
desp_anterior = despesas[beneficio][ano-1][idade-1] + despesas[beneficio][ano-1][idade]
else:
desp_anterior = despesas[beneficio][ano-1][idade-1]
conc_anterior = concessoes[beneficio][ano-1][idade-1]
# OBS: Acredito que o correto seria usar os segurados e nao a PopOcup
# OBS: O rend_med_ocup_ant já inclui a taxa de reposição da Eq. 45
valor_med_conc_ant = val_med_novos_ben[ano-1][idade-1]
npar = nparcelas[beneficio][ano]
npar_ant = nparcelas[beneficio][ano-1]
prob_morte = probabilidades['Mort'+sexo][ano][idade]
fam = probabilidades['fam'+beneficio][ano][idade]
# Nas planilhas usa-se o termo Atualização Monetária
reajuste = parametros['tx_reajuste_beneficios'][ano]
novas_conc = concessoes[beneficio][ano][idade]
valor_med_conc = val_med_novos_ben[ano][idade]
# Eq. 45
part1 = desp_anterior + conc_anterior * valor_med_conc_ant * (npar_ant/2)
part2 = (1 - prob_morte * fam) * (1 + reajuste/100)
part3 = (novas_conc * valor_med_conc * (npar/2))
despesas[beneficio].loc[idade, ano] = part1 * part2 + part3
# Idade zero
novas_conc = concessoes[beneficio][ano][0]
valor_med_conc = val_med_novos_ben[ano][0]
npar = nparcelas[beneficio][ano]
despesas[beneficio].loc[0, ano] = novas_conc * valor_med_conc * (npar/2)
##### Calcula despesas para o Salário Maternidade #####
for beneficio in dados.get_id_beneficios('SalMat'):
# 2014-2060
anos = [periodo[0]-1] + periodo
# Verifica se existe estoque para o beneficio
if beneficio in estoques:
# Objeto do tipo Series que armazena as despesas acumuladas por ano
desp_acumulada = pd.Series(0.0, index=anos)
# Obtem o estoques acumulados do ano atual
for ano in anos:
estoq_total = estoques[beneficio][ano]
# se a clientela for UrbAcim
if 'Acim' in beneficio:
valor_benef = valMedBenef['SalMatUrbAcimM'][ano]
else:
valor_benef = salarios['salarioMinimo'][ano]
npar = nparcelas[beneficio][ano]
# OBS: A LDO não descreve a equação para o calculo de despesas para o SalMat
desp_acumulada[ano] = estoq_total * valor_benef * npar
# Salva no DataFrame
despesas[beneficio] = desp_acumulada
##### Calcula a despesa total #####
anos = [periodo[0]-1] + periodo #2014-2060
desp_total = pd.Series(0.0, index=anos) # Objeto do tipo Serie que armazena a despesa total
desp_total_urb = pd.Series(0.0, index=anos) # Despesa total Urbana
desp_total_rur = pd.Series(0.0, index=anos) # Despesa total Rural
for ano in anos:
for beneficio in despesas.keys():
# O objeto que armazena as despesas com Salário Maternidade é diferente
if 'SalMat' in beneficio:
if ano in despesas[beneficio].index: # verifica se existe projeção para esse ano
if 'Urb' in beneficio: # Separa despesa Urbana e Rural
desp_total_urb[ano] += despesas[beneficio][ano]
else:
desp_total_rur[ano] += despesas[beneficio][ano]
else:
# Calculo para os demais benefícios
if ano in despesas[beneficio].columns: # verifica se existe projeção para esse ano
if 'Urb' in beneficio: # Separa despesa Urbana e Rural
desp_total_urb[ano] += despesas[beneficio][ano].sum()
else:
desp_total_rur[ano] += despesas[beneficio][ano].sum()
desp_total = desp_total_urb + desp_total_rur
# Calcula a taxa de crescimento da Despesa
tx_cres_desp = pd.Series(0.0, index=periodo)
for ano in periodo: # pula o primeiro ano
tx_cres_desp[ano] = desp_total[ano]/desp_total[ano-1] - 1
resultados['despesas'] = desp_total
resultados['despesas_urb'] = desp_total_urb
resultados['despesas_rur'] = desp_total_rur
resultados['tx_cres_despesa'] = tx_cres_desp
return resultados
# Calcula o número de parcelas paga por ano por um benefício
# Existe uma descrição de cálculo na seção 4.6 da LDO (pag. 43)
# Porém, são necessários o valores totaais e despesas por beneficios para fazer esse cálculo
# como só temos dados do mês de dezembro, os valores foram fixados manualmente
# OBS: Valores obtidos das planilhas do MF
def calc_n_parcelas(estoques, despesa, valMedBenef, periodo):
# ano_estoq = periodo[0]-1 # 2014
dados = LerTabelas()
# Dicionário que armazena o número médio de parcelas para tipo de beneficio
n_parcelas = {}
# 2014
ano_estoque = periodo[0]-1
# 2014-2060
anos = [ano_estoque] + periodo
# Aposentadorias Idade Normal
for benef in dados.get_id_beneficios('Apin'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.95 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.82 # 2016-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.7 # 2014-2015
n_parcelas[benef].loc[periodo[1]:] = 12.95 # 2016-2060
# Aposentadorias TC Normal
for benef in dados.get_id_beneficios('Atcn'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]:] = 12.92 # 2015-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 11.7 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.0 # 2016-2060
# Aposentadorias Idade Deficiente
for benef in dados.get_id_beneficios('Apid'):
n_parcelas[benef] = pd.Series(13.0, index=anos) # 2014-2060
# Aposentadorias TC Professor
for benef in dados.get_id_beneficios('Atcp'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos) # 2014-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 13.46 # 2015
n_parcelas[benef].loc[periodo[1]:] = 14.5 # 2016-2060
# Aposentadorias Invalidez
for benef in dados.get_id_beneficios('Ainv'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 13.09 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.96 # 2016-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.3 # 2015
n_parcelas[benef].loc[periodo[1]:] = 11.9 # 2016-2060
# Aposentadorias TC especial
for benef in dados.get_id_beneficios('Atce'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos) # 2014-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.5 # 2015
n_parcelas[benef].loc[periodo[1]:] = 13.6 # 2016-2060
# Aposentadorias TC Deficiente
for benef in dados.get_id_beneficios('Atcd'):
n_parcelas[benef] = pd.Series(13.0, index=anos) # 2014-2060
# Pensões
for benef in dados.get_id_beneficios('Pe'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.97 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.89 # 2016-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.70 # 2015
n_parcelas[benef].loc[periodo[1]:] = 13.10 # 2016-2060
# Auxilios Doença
for benef in dados.get_id_beneficios('Auxd'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 11.83 # 2015-2015
n_parcelas[benef].loc[periodo[1]:] = 13.32 # 2016-2060
else:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 8.33 # 2015
n_parcelas[benef].loc[periodo[1]:] = 9.01 # 2016-2060
# Auxilios Acidente
for benef in dados.get_id_beneficios('Auxa'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.99 # 2015
n_parcelas[benef].loc[periodo[1]:] = 13.46 # 2016-2060
else:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.43 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.56 # 2016-2060
# Auxilios Reclusão
for benef in dados.get_id_beneficios('Auxr'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.06 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.18 # 2016-2060
else:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.31 # 2015
n_parcelas[benef].loc[periodo[1]:] = 14.03 # 2016-2060
# Salario Maternidade
for benef in dados.get_id_beneficios('SalMat'):
n_parcelas[benef] = pd.Series(4.0, index=anos) # 2014-2060
# Assistenciais LoasDef
for benef in dados.get_id_beneficios('LoasDef'):
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.05 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.00 # 2016-2060
# Assistenciais LoasIdo
for benef in dados.get_id_beneficios('LoasIdo'):
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 11.96 # 2015
n_parcelas[benef].loc[periodo[1]:] = 11.73 # 2016-2060
# Assistenciais RMV
for benef in dados.get_id_beneficios('Rmv'):
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.09 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.06 # 2016-2060
# for beneficio in estoques.keys():
# # Verifica se existe dados de despesa para o beneficio
# if beneficio in despesa.keys():
# desp = despesa[beneficio][ano_estoq].sum()
# est = estoques[beneficio][ano_estoq].sum()
# vm = valMedBenef[beneficio][ano_estoq].mean()
# n_parcelas[beneficio] = Dt/(vm*est)
return n_parcelas
| gpl-3.0 |
openstack/horizon | openstack_dashboard/test/unit/api/test_network.py | 1 | 6484 | # Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from unittest import mock
import netaddr
from django.test.utils import override_settings
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class NetworkApiNeutronTests(test.APIMockTestCase):
def setUp(self):
super().setUp()
neutronclient = mock.patch.object(api.neutron, 'neutronclient').start()
self.qclient = neutronclient.return_value
def _get_expected_addresses(self, server, no_fip_expected=True):
server_ports = self.ports.filter(device_id=server.id)
addresses = collections.defaultdict(list)
for p in server_ports:
net_name = self.networks.get(id=p['network_id']).name
for ip in p.fixed_ips:
version = netaddr.IPAddress(ip['ip_address']).version
addresses[net_name].append(
{'version': version,
'addr': ip['ip_address'],
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'fixed'})
if no_fip_expected:
continue
fips = self.floating_ips.filter(port_id=p['id'])
if not fips:
continue
# Only one FIP should match.
fip = fips[0]
addresses[net_name].append(
{'version': 4,
'addr': fip.floating_ip_address,
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'floating'})
return addresses
def _check_server_address(self, res_server_data, no_fip_expected=False):
expected_addresses = self._get_expected_addresses(res_server_data,
no_fip_expected)
self.assertEqual(len(expected_addresses),
len(res_server_data.addresses))
for net, addresses in expected_addresses.items():
self.assertIn(net, res_server_data.addresses)
self.assertEqual(addresses, res_server_data.addresses[net])
def _test_servers_update_addresses(self, router_enabled=True):
tenant_id = self.request.user.tenant_id
servers = self.servers.list()
server_ids = tuple([server.id for server in servers])
server_ports = [p for p in self.api_ports.list()
if p['device_id'] in server_ids]
server_port_ids = tuple([p['id'] for p in server_ports])
if router_enabled:
assoc_fips = [fip for fip in self.api_floating_ips.list()
if fip['port_id'] in server_port_ids]
server_network_ids = [p['network_id'] for p in server_ports]
server_networks = [net for net in self.api_networks.list()
if net['id'] in server_network_ids]
list_ports_retvals = [{'ports': server_ports}]
self.qclient.list_ports.side_effect = list_ports_retvals
if router_enabled:
self.qclient.list_floatingips.return_value = {'floatingips':
assoc_fips}
list_ports_retvals.append({'ports': self.api_ports.list()})
self.qclient.list_networks.return_value = {'networks': server_networks}
self.qclient.list_subnets.return_value = {'subnets':
self.api_subnets.list()}
api.network.servers_update_addresses(self.request, servers)
self.assertEqual(self.servers.count(), len(servers))
self.assertEqual([server.id for server in self.servers.list()],
[server.id for server in servers])
no_fip_expected = not router_enabled
# server[0] has one fixed IP and one floating IP
# if router ext isenabled.
self._check_server_address(servers[0], no_fip_expected)
# The expected is also calculated, we examine the result manually once.
addrs = servers[0].addresses['net1']
if router_enabled:
self.assertEqual(3, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
self.assertEqual('fixed', addrs[1]['OS-EXT-IPS:type'])
self.assertEqual('floating', addrs[2]['OS-EXT-IPS:type'])
else:
self.assertEqual(2, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
self.assertEqual('fixed', addrs[1]['OS-EXT-IPS:type'])
# server[1] has one fixed IP.
self._check_server_address(servers[1], no_fip_expected)
# manual check.
addrs = servers[1].addresses['net2']
self.assertEqual(1, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
# server[2] has no corresponding ports in neutron_data,
# so it should be an empty dict.
self.assertFalse(servers[2].addresses)
expected_list_ports = [mock.call(device_id=server_ids)]
if router_enabled:
self.qclient.list_floatingips.assert_called_once_with(
tenant_id=tenant_id, port_id=server_port_ids)
expected_list_ports.append(mock.call(tenant_id=tenant_id))
else:
self.assertEqual(0, self.qclient.list_floatingips.call_count)
self.qclient.list_ports.assert_has_calls(expected_list_ports)
self.qclient.list_networks.assert_called_once_with(
id=frozenset(server_network_ids))
self.qclient.list_subnets.assert_called_once_with()
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': True})
def test_servers_update_addresses(self):
self._test_servers_update_addresses()
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
def test_servers_update_addresses_router_disabled(self):
self._test_servers_update_addresses(router_enabled=False)
| apache-2.0 |
virneo/nupic | tests/unit/nupic/algorithms/cla_classifier_diff_test.py | 35 | 1634 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the "diff" version of the CLA classifier."""
import unittest2 as unittest
from nupic.algorithms.cla_classifier_diff import CLAClassifierDiff
import cla_classifier_test
class CLAClassifierDiffTest(cla_classifier_test.CLAClassifierTest):
"""CLAClassifierDiff unit tests."""
def setUp(self):
self._classifier = CLAClassifierDiff
unittest.skip("The classifier diff fails for this test for some reason. "
"Should be fixed but the diff classifier is just for testing "
"anyway.")
def testComputeCategory2(self):
pass
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/linux_benchmarks/ior_benchmark.py | 1 | 4221 | # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs IOR and mdtest benchmarks.
IOR is a tool used for distributed testing of filesystem performance.
mdtest is used for distributed testing of filesystem metadata performance.
See https://github.com/hpc/ior for more info.
"""
import itertools
import posixpath
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import disk
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import ior
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'ior_num_procs', 256,
'The number of MPI processes to use for IOR.')
flags.DEFINE_string(
'ior_script', 'default_ior_script',
'The IOR script to run. See '
'https://github.com/hpc/ior/blob/master/doc/sphinx/userDoc/skripts.rst '
'for more info.')
flags.DEFINE_integer(
'mdtest_num_procs', 32,
'The number of MPI processes to use for mdtest.')
flags.DEFINE_list(
'mdtest_args', ['-n 1000 -u'],
'Command line arguments to be passed to mdtest. '
'Each set of args in the list will be run separately.')
flags.DEFINE_boolean(
'mdtest_drop_caches', True,
'Whether to drop caches between the create/stat/delete phases. '
'If this is set, mdtest will be run 3 times with the -C, -T, and -r '
'options and the client page caches will be dropped between runs. '
'When False, a Full Sweep (Create, Stat, Delete) is run.')
BENCHMARK_NAME = 'ior'
BENCHMARK_CONFIG = """
ior:
description: Runs IOR and mdtest benchmarks.
flags:
data_disk_type: nfs
data_disk_size: 2048
vm_groups:
default:
vm_spec: *default_dual_core
disk_spec: *default_500_gb
vm_count: null
"""
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Install IOR on the vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: vm.Install('ior'), vms)
vm_util.RunThreaded(lambda vm: vm.AuthenticateVm(), vms)
hpc_util.CreateMachineFile(vms)
def Run(benchmark_spec):
"""Run the IOR benchmark on the vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
master_vm = benchmark_spec.vms[0]
results = []
# Run IOR benchmark.
if FLAGS.ior_num_procs and FLAGS.ior_script:
remote_script_path = posixpath.join(master_vm.scratch_disks[0].mount_point,
FLAGS.ior_script)
master_vm.PushDataFile(
FLAGS.ior_script,
remote_script_path,
# SCP directly to SMB returns an error, so first copy to disk.
should_double_copy=(FLAGS.data_disk_type == disk.SMB))
results += ior.RunIOR(master_vm, FLAGS.ior_num_procs, remote_script_path)
# Run mdtest benchmark.
phase_args = ('-C', '-T', '-r') if FLAGS.mdtest_drop_caches else ('-C -T -r',)
mdtest_args = (' '.join(args) for args in
itertools.product(FLAGS.mdtest_args, phase_args))
for args in mdtest_args:
results += ior.RunMdtest(master_vm, FLAGS.mdtest_num_procs, args)
if FLAGS.mdtest_drop_caches:
vm_util.RunThreaded(lambda vm: vm.DropCaches(), benchmark_spec.vms)
return results
def Cleanup(unused_benchmark_spec):
"""Cleanup the IOR benchmark.
Args:
unused_benchmark_spec: The benchmark specification. Contains all data that
is required to run the benchmark.
"""
pass
| apache-2.0 |
tuhangdi/django | tests/utils_tests/test_inspect.py | 157 | 1161 | import unittest
from django.utils import inspect
class Person(object):
def no_arguments(self):
return None
def one_argument(self, something):
return something
def just_args(self, *args):
return args
def all_kinds(self, name, address='home', age=25, *args, **kwargs):
return kwargs
class TestInspectMethods(unittest.TestCase):
def test_get_func_full_args_no_arguments(self):
self.assertEqual(inspect.get_func_full_args(Person.no_arguments), [])
def test_get_func_full_args_one_argument(self):
self.assertEqual(inspect.get_func_full_args(Person.one_argument), [('something',)])
def test_get_func_full_args_all_arguments(self):
arguments = [('name',), ('address', 'home'), ('age', 25), ('*args',), ('**kwargs',)]
self.assertEqual(inspect.get_func_full_args(Person.all_kinds), arguments)
def test_func_accepts_var_args_has_var_args(self):
self.assertEqual(inspect.func_accepts_var_args(Person.just_args), True)
def test_func_accepts_var_args_no_var_args(self):
self.assertEqual(inspect.func_accepts_var_args(Person.one_argument), False)
| bsd-3-clause |
bradh/six-library | processFiles.py | 4 | 3653 | import sys, os, re, subprocess
from os.path import isdir, join, split, exists
def cosmoSkyMed(filename):
path, fname = split(filename)
if re.match(r'CSK.*.MBI.tif', fname):
xmlName = join(path, fname.rsplit('MBI.tif')[0] + 'attribs.xml')
if exists(xmlName):
return (xmlName, filename)
return None
def radarsat2(filename):
path, fname = split(filename)
# if re.match(r'RS2_.*', split(path)[1]) and re.match(r'imagery_.*.tif', fname):
if re.match(r'imagery_.*.tif', fname):
xmlName = join(path, 'product.xml')
if exists(xmlName):
return (xmlName, filename)
return None
def terraSAR(filename):
path, fname = split(filename)
if re.match(r'IMAGEDATA', split(path)[1]) and re.match(r'IMAGE_.*.tif', fname):
parDir, xmlName = split(split(path)[0])
xmlName = join(parDir, xmlName, '%s.xml' % xmlName)
if exists(xmlName):
return (xmlName, filename)
return None
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] [path(s)]")
parser.add_option("-d", "--dir", dest='outDir', metavar='DIR',
help="specify the output directory (default=cwd)")
parser.add_option("-x", "--xml", dest='xml', action='store_true',
help="Write An XML output")
parser.add_option("-n", "--nitf", dest='nitf', action='store_true',
help="Write A NITF output")
parser.add_option("-t", "--tiff", dest='tiff', action='store_true',
help="Write A TIFF output")
parser.add_option("-k", "--kml", dest='kml', action='store_true',
help="Write A KML output")
parser.add_option("-l", "--level", dest='level', metavar='LEVEL', default=None,
help="Level at which to log <debug|info|warn|error>")
parser.add_option("--limit", dest='limit', metavar='NUM', default=0, type='int',
help="Specify a limit on the number of images to process")
(options, args) = parser.parse_args()
#default to cwd if none provided
outDir = options.outDir or os.getcwd()
if not exists(outDir):
os.makedirs(outDir)
inputs = []
paths = args[:]
while paths:
path = paths.pop(0)
if isdir(path):
paths.extend(map(lambda x: join(path, x), os.listdir(path)))
else:
f = cosmoSkyMed(path)
if not f:
f = radarsat2(path)
if not f:
f = terraSAR(path)
if f:
inputs.append(f)
appName = os.path.abspath(join(split(__file__)[0], './install/bin/ScanSARConverter'))
if 'win32' in sys.platform:
appName += '.exe'
cmd = [appName]
cmd.extend(['-d', outDir])
if options.xml:
cmd.append('-x')
if options.nitf:
cmd.append('-n')
if options.tiff:
cmd.append('-t')
if options.kml:
cmd.append('-k')
if options.level:
cmd.extend(['-l', options.level])
def process(xml, image):
command = cmd[:]
command.append('%s,%s' % (xml, image))
print command
p = subprocess.Popen(command).communicate()
limit = (options.limit > 0 and min(options.limit, len(inputs)) or len(inputs))
print 'Processing %d files' % limit
for i, (xml, image) in enumerate(inputs):
if i >= limit:
break
process(xml, image)
| lgpl-3.0 |
pjg101/SickRage | lib/hachoir_metadata/setter.py | 94 | 5203 | from datetime import date, datetime
import re
from hachoir_core.language import Language
from locale import setlocale, LC_ALL
from time import strptime
from hachoir_metadata.timezone import createTimezone
from hachoir_metadata import config
NORMALIZE_REGEX = re.compile("[-/.: ]+")
YEAR_REGEX1 = re.compile("^([0-9]{4})$")
# Date regex: YYYY-MM-DD (US format)
DATE_REGEX1 = re.compile("^([0-9]{4})~([01][0-9])~([0-9]{2})$")
# Date regex: YYYY-MM-DD HH:MM:SS (US format)
DATETIME_REGEX1 = re.compile("^([0-9]{4})~([01][0-9])~([0-9]{2})~([0-9]{1,2})~([0-9]{2})~([0-9]{2})$")
# Datetime regex: "MM-DD-YYYY HH:MM:SS" (FR format)
DATETIME_REGEX2 = re.compile("^([01]?[0-9])~([0-9]{2})~([0-9]{4})~([0-9]{1,2})~([0-9]{2})~([0-9]{2})$")
# Timezone regex: "(...) +0200"
TIMEZONE_REGEX = re.compile("^(.*)~([+-][0-9]{2})00$")
# Timestmap: 'February 2007'
MONTH_YEAR = "%B~%Y"
# Timestmap: 'Sun Feb 24 15:51:09 2008'
RIFF_TIMESTAMP = "%a~%b~%d~%H~%M~%S~%Y"
# Timestmap: 'Thu, 19 Jul 2007 09:03:57'
ISO_TIMESTAMP = "%a,~%d~%b~%Y~%H~%M~%S"
def parseDatetime(value):
"""
Year and date:
>>> parseDatetime("2000")
(datetime.date(2000, 1, 1), u'2000')
>>> parseDatetime("2004-01-02")
datetime.date(2004, 1, 2)
Timestamp:
>>> parseDatetime("2004-01-02 18:10:45")
datetime.datetime(2004, 1, 2, 18, 10, 45)
>>> parseDatetime("2004-01-02 18:10:45")
datetime.datetime(2004, 1, 2, 18, 10, 45)
Timestamp with timezone:
>>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0000')
datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=<TimezoneUTC delta=0, name=u'UTC'>)
>>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0200')
datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=<Timezone delta=2:00:00, name='+0200'>)
"""
value = NORMALIZE_REGEX.sub("~", value.strip())
regs = YEAR_REGEX1.match(value)
if regs:
try:
year = int(regs.group(1))
return (date(year, 1, 1), unicode(year))
except ValueError:
pass
regs = DATE_REGEX1.match(value)
if regs:
try:
year = int(regs.group(1))
month = int(regs.group(2))
day = int(regs.group(3))
return date(year, month, day)
except ValueError:
pass
regs = DATETIME_REGEX1.match(value)
if regs:
try:
year = int(regs.group(1))
month = int(regs.group(2))
day = int(regs.group(3))
hour = int(regs.group(4))
min = int(regs.group(5))
sec = int(regs.group(6))
return datetime(year, month, day, hour, min, sec)
except ValueError:
pass
regs = DATETIME_REGEX2.match(value)
if regs:
try:
month = int(regs.group(1))
day = int(regs.group(2))
year = int(regs.group(3))
hour = int(regs.group(4))
min = int(regs.group(5))
sec = int(regs.group(6))
return datetime(year, month, day, hour, min, sec)
except ValueError:
pass
current_locale = setlocale(LC_ALL, "C")
try:
match = TIMEZONE_REGEX.match(value)
if match:
without_timezone = match.group(1)
delta = int(match.group(2))
delta = createTimezone(delta)
else:
without_timezone = value
delta = None
try:
timestamp = strptime(without_timezone, ISO_TIMESTAMP)
arguments = list(timestamp[0:6]) + [0, delta]
return datetime(*arguments)
except ValueError:
pass
try:
timestamp = strptime(without_timezone, RIFF_TIMESTAMP)
arguments = list(timestamp[0:6]) + [0, delta]
return datetime(*arguments)
except ValueError:
pass
try:
timestamp = strptime(value, MONTH_YEAR)
arguments = list(timestamp[0:3])
return date(*arguments)
except ValueError:
pass
finally:
setlocale(LC_ALL, current_locale)
return None
def setDatetime(meta, key, value):
if isinstance(value, (str, unicode)):
return parseDatetime(value)
elif isinstance(value, (date, datetime)):
return value
return None
def setLanguage(meta, key, value):
"""
>>> setLanguage(None, None, "fre")
<Language 'French', code='fre'>
>>> setLanguage(None, None, u"ger")
<Language 'German', code='ger'>
"""
return Language(value)
def setTrackTotal(meta, key, total):
"""
>>> setTrackTotal(None, None, "10")
10
"""
try:
return int(total)
except ValueError:
meta.warning("Invalid track total: %r" % total)
return None
def setTrackNumber(meta, key, number):
if isinstance(number, (int, long)):
return number
if "/" in number:
number, total = number.split("/", 1)
meta.track_total = total
try:
return int(number)
except ValueError:
meta.warning("Invalid track number: %r" % number)
return None
def normalizeString(text):
if config.RAW_OUTPUT:
return text
return text.strip(" \t\v\n\r\0")
| gpl-3.0 |
hradec/gaffer | python/GafferSceneUI/DeleteOutputsUI.py | 13 | 2461 | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferScene
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferScene.DeleteOutputs,
"description",
"""
A node which removes outputs from the globals.
""",
plugs = {
"names" : [
"description",
"""
The names of outputs to be removed. Names should be
separated by spaces and can use Gaffer's standard wildcards.
""",
],
"invertNames" : [
"description",
"""
When on, matching names are kept, and non-matching names are removed.
""",
],
}
)
| bsd-3-clause |
TFenby/python-mode | pymode/libs2/rope/refactor/wildcards.py | 22 | 5833 | from rope.base import ast, evaluate, builtins, pyobjects
from rope.refactor import patchedast, occurrences
class Wildcard(object):
def get_name(self):
"""Return the name of this wildcard"""
def matches(self, suspect, arg):
"""Return `True` if `suspect` matches this wildcard"""
class Suspect(object):
def __init__(self, pymodule, node, name):
self.name = name
self.pymodule = pymodule
self.node = node
class DefaultWildcard(object):
"""The default restructuring wildcard
The argument passed to this wildcard is in the
``key1=value1,key2=value2,...`` format. Possible keys are:
* name - for checking the reference
* type - for checking the type
* object - for checking the object
* instance - for checking types but similar to builtin isinstance
* exact - matching only occurrences with the same name as the wildcard
* unsure - matching unsure occurrences
"""
def __init__(self, project):
self.project = project
def get_name(self):
return 'default'
def matches(self, suspect, arg=''):
args = parse_arg(arg)
if not self._check_exact(args, suspect):
return False
if not self._check_object(args, suspect):
return False
return True
def _check_object(self, args, suspect):
kind = None
expected = None
unsure = args.get('unsure', False)
for check in ['name', 'object', 'type', 'instance']:
if check in args:
kind = check
expected = args[check]
if expected is not None:
checker = _CheckObject(self.project, expected,
kind, unsure=unsure)
return checker(suspect.pymodule, suspect.node)
return True
def _check_exact(self, args, suspect):
node = suspect.node
if args.get('exact'):
if not isinstance(node, ast.Name) or not node.id == suspect.name:
return False
else:
if not isinstance(node, ast.expr):
return False
return True
def parse_arg(arg):
if isinstance(arg, dict):
return arg
result = {}
tokens = arg.split(',')
for token in tokens:
if '=' in token:
parts = token.split('=', 1)
result[parts[0].strip()] = parts[1].strip()
else:
result[token.strip()] = True
return result
class _CheckObject(object):
def __init__(self, project, expected, kind='object', unsure=False):
self.project = project
self.kind = kind
self.unsure = unsure
self.expected = self._evaluate(expected)
def __call__(self, pymodule, node):
pyname = self._evaluate_node(pymodule, node)
if pyname is None or self.expected is None:
return self.unsure
if self._unsure_pyname(pyname, unbound=self.kind == 'name'):
return True
if self.kind == 'name':
return self._same_pyname(self.expected, pyname)
else:
pyobject = pyname.get_object()
if self.kind == 'object':
objects = [pyobject]
if self.kind == 'type':
objects = [pyobject.get_type()]
if self.kind == 'instance':
objects = [pyobject]
objects.extend(self._get_super_classes(pyobject))
objects.extend(self._get_super_classes(pyobject.get_type()))
for pyobject in objects:
if self._same_pyobject(self.expected.get_object(), pyobject):
return True
return False
def _get_super_classes(self, pyobject):
result = []
if isinstance(pyobject, pyobjects.AbstractClass):
for superclass in pyobject.get_superclasses():
result.append(superclass)
result.extend(self._get_super_classes(superclass))
return result
def _same_pyobject(self, expected, pyobject):
return expected == pyobject
def _same_pyname(self, expected, pyname):
return occurrences.same_pyname(expected, pyname)
def _unsure_pyname(self, pyname, unbound=True):
return self.unsure and occurrences.unsure_pyname(pyname, unbound)
def _split_name(self, name):
parts = name.split('.')
expression, kind = parts[0], parts[-1]
if len(parts) == 1:
kind = 'name'
return expression, kind
def _evaluate_node(self, pymodule, node):
scope = pymodule.get_scope().get_inner_scope_for_line(node.lineno)
expression = node
if isinstance(expression, ast.Name) and \
isinstance(expression.ctx, ast.Store):
start, end = patchedast.node_region(expression)
text = pymodule.source_code[start:end]
return evaluate.eval_str(scope, text)
else:
return evaluate.eval_node(scope, expression)
def _evaluate(self, code):
attributes = code.split('.')
pyname = None
if attributes[0] in ('__builtin__', '__builtins__'):
class _BuiltinsStub(object):
def get_attribute(self, name):
return builtins.builtins[name]
def __getitem__(self, name):
return builtins.builtins[name]
def __contains__(self, name):
return name in builtins.builtins
pyobject = _BuiltinsStub()
else:
pyobject = self.project.get_module(attributes[0])
for attribute in attributes[1:]:
pyname = pyobject[attribute]
if pyname is None:
return None
pyobject = pyname.get_object()
return pyname
| lgpl-3.0 |
JoeriHermans/dist-keras | examples/kafka_producer.py | 3 | 1754 | """
This example will be used as a Kafka producer to generate dummy
data for our Spark Streaming example.
"""
## BEGIN Imports. ##############################################################
from kafka import *
import sys
import pandas
import time
import json
## END Imports. ################################################################
def usage():
print("Distributed Keras Example: Kafka Producer")
print("")
print("Usage:")
print("python kafka_producer.py [bootstrap_server]")
exit(0)
def allocate_producer(bootstrap_server):
producer = KafkaProducer(bootstrap_servers=[bootstrap_server])
return producer
def read_data():
path = 'data/atlas_higgs.csv'
data = []
# Use Pandas to infer the types.
data = pandas.read_csv(path)
# Remove the unneeded columns.
del data['Label']
del data['Weight']
# Convert the data to a list of dictionaries.
data = data.transpose().to_dict().values()
return data
def produce(producer, topic, data):
for row in data:
producer.send(topic, json.dumps(row))
def main():
# Check if the required number of arguments has been specified.
if len(sys.argv) != 2:
usage()
# Fetch the bootstrap server from the arguments.
bootstrap_server = sys.argv[1]
# Allocate the producer.
producer = allocate_producer(bootstrap_server)
# Read the data from the CSV file.
data = read_data()
iteration = 1
# Transmit the data in a continous loop while waiting for 5 seconds after every iteration.
while True:
print("Iteration " + str(iteration) + ".")
produce(producer, 'Machine_Learning', data)
iteration += 1
time.sleep(5)
if __name__ == "__main__":
main()
| gpl-3.0 |
yuval-harpaz/MNE4D | pyScripts/b391g2.py | 1 | 3027 | import mne
import surfer
import numpy as np
from compute_g2 import g2
#cd ~/Data/epilepsy/b391/3
rawEmpty = mne.io.bti.read_raw_bti('/home/yuval/Data/emptyRoom/lf_c,rfhp0.1Hz', rename_channels=False, sort_by_ch_name=False)
rawEmpty.pick_types('mag')
rawEmpty.filter(20,70)
events = mne.read_events('/home/yuval/Data/emptyRoom/1s-eve.fif')
# event_id = dict(quiet=1,rythm=2,hf=3)
event_id = dict(empty=1)
epochsEmpty = mne.Epochs(raw, events, event_id,
tmin=0, tmax=0.25, baseline=(None, None), preload=True)
empty_cov = mne.compute_covariance(epochsEmpty, method='empirical')
mne.write_cov('/home/yuval/Data/emptyRoom/epi20_70-cov.fif', empty_cov)
subject = 'b391'
freesurfer_home = "/usr/local/freesurfer"
subjects_dir = freesurfer_home + "/subjects"
raw=mne.io.Raw('0-raw.fif', preload=True)
raw.pick_types('mag')
raw.filter(20,70)
src = mne.setup_source_space(
subject, subjects_dir=subjects_dir, add_dist=False, fname=None)
bem = subjects_dir + '/' + subject + '/bem/' + subject + '-bem.fif'
trans = 'b391-trans.fif'
# fwd = mne.make_forward_solution(
# raw.info, trans, src, bem, fname='3-fwd.fif', meg=True, eeg=False, mindist=5.0, n_jobs=4, overwrite=True)
fwd = mne.read_forward_solution('3-fwd.fif')
fwd = mne.convert_forward_solution(fwd, surf_ori=True)
inv = mne.minimum_norm.make_inverse_operator(raw.info, fwd, empty_cov, fixed=True)
mne.minimum_norm.write_inverse_operator('3-inv.fif', inv)
dur=np.int(round(2034.5/4)+1)
snr = 3.0
lambda2 = 1.0 / snr ** 2
samps=np.arange(0,raw._data.shape[1],dur)
samps=samps[0:-1]
Kur=np.zeros((inv['nsource']))
for sampi in range(1,len(samps)):
start=samps[sampi]
stop=start+dur
stc = mne.minimum_norm.apply_inverse_raw(raw, inv, lambda2, 'dSPM', None,
start, stop, pick_ori=None)
kur=g2(stc.data)
kur[kur<0]=0
Kur=Kur+kur
print("DONE "+str(sampi+1)+" of "+str(len(samps)))
Kur=Kur/sampi
verticesL = stc.lh_vertno
verticesR = stc.rh_vertno
stcL=Kur[0:len(verticesL)]
stcR=Kur[len(verticesL):len(Kur)]
colormap = 'hot'
# export SUBJECTS_DIR=/usr/local/freesurfer/subjects/
brain = surfer.Brain(subject, 'both', 'white',
views='lateral', subjects_dir=subjects_dir)
brain.add_data(stcL, colormap=colormap,
vertices=verticesL, smoothing_steps=10, hemi='lh')
brain.add_data(stcR, colormap=colormap,
vertices=verticesR, smoothing_steps=10, hemi='rh')
maxval = max(stcL.max(), stcR.max())
brain.scale_data_colormap(
fmin=maxval/4, fmid=maxval / 2, fmax=maxval, transparent=True)
brain = surfer.Brain(subject, 'both', 'pial',
views='lateral', subjects_dir=subjects_dir)
brain.add_data(stcL, colormap=colormap,
vertices=verticesL, smoothing_steps=10, hemi='lh')
brain.add_data(stcR, colormap=colormap,
vertices=verticesR, smoothing_steps=10, hemi='rh')
maxval = max(stcL.max(), stcR.max())
brain.scale_data_colormap(
fmin=maxval/4, fmid=maxval / 2, fmax=maxval, transparent=True)
| gpl-2.0 |
saeki-masaki/glance | glance/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py | 12 | 10367 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (Column, ForeignKey, Index, MetaData, Table)
from glance.db.sqlalchemy.migrate_repo.schema import (
BigInteger, Boolean, DateTime, Integer, Numeric, String, Text,
create_tables) # noqa
def define_artifacts_table(meta):
artifacts = Table('artifacts',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('name', String(255), nullable=False),
Column('type_name', String(255), nullable=False),
Column('type_version_prefix', BigInteger(),
nullable=False),
Column('type_version_suffix', String(255)),
Column('type_version_meta', String(255)),
Column('version_prefix', BigInteger(), nullable=False),
Column('version_suffix', String(255)),
Column('version_meta', String(255)),
Column('description', Text()),
Column('visibility', String(32), nullable=False),
Column('state', String(32), nullable=False),
Column('owner', String(255), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(),
nullable=False),
Column('deleted_at', DateTime()),
Column('published_at', DateTime()),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_name_and_version', artifacts.c.name,
artifacts.c.version_prefix, artifacts.c.version_suffix)
Index('ix_artifact_type', artifacts.c.type_name,
artifacts.c.type_version_prefix, artifacts.c.type_version_suffix)
Index('ix_artifact_state', artifacts.c.state)
Index('ix_artifact_owner', artifacts.c.owner)
Index('ix_artifact_visibility', artifacts.c.visibility)
return artifacts
def define_artifact_tags_table(meta):
artifact_tags = Table('artifact_tags',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('artifact_id', String(36),
ForeignKey('artifacts.id'), nullable=False),
Column('value', String(255), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_tags_artifact_id', artifact_tags.c.artifact_id)
Index('ix_artifact_tags_artifact_id_tag_value',
artifact_tags.c.artifact_id, artifact_tags.c.value)
return artifact_tags
def define_artifact_dependencies_table(meta):
artifact_dependencies = Table('artifact_dependencies',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('artifact_source', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('artifact_dest', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('artifact_origin', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('is_direct', Boolean(),
nullable=False),
Column('position', Integer()),
Column('name', String(36)),
Column('created_at', DateTime(),
nullable=False),
Column('updated_at', DateTime(),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_dependencies_source_id',
artifact_dependencies.c.artifact_source)
Index('ix_artifact_dependencies_dest_id',
artifact_dependencies.c.artifact_dest),
Index('ix_artifact_dependencies_origin_id',
artifact_dependencies.c.artifact_origin)
Index('ix_artifact_dependencies_direct_dependencies',
artifact_dependencies.c.artifact_source,
artifact_dependencies.c.is_direct)
return artifact_dependencies
def define_artifact_blobs_table(meta):
artifact_blobs = Table('artifact_blobs',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('artifact_id', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('size', BigInteger(), nullable=False),
Column('checksum', String(32)),
Column('name', String(255), nullable=False),
Column('item_key', String(329)),
Column('position', Integer()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_blobs_artifact_id',
artifact_blobs.c.artifact_id)
Index('ix_artifact_blobs_name',
artifact_blobs.c.name)
return artifact_blobs
def define_artifact_properties_table(meta):
artifact_properties = Table('artifact_properties',
meta,
Column('id', String(36),
primary_key=True,
nullable=False),
Column('artifact_id', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('name', String(255),
nullable=False),
Column('string_value', String(255)),
Column('int_value', Integer()),
Column('numeric_value', Numeric()),
Column('bool_value', Boolean()),
Column('text_value', Text()),
Column('created_at', DateTime(),
nullable=False),
Column('updated_at', DateTime(),
nullable=False),
Column('position', Integer()),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_properties_artifact_id',
artifact_properties.c.artifact_id)
Index('ix_artifact_properties_name', artifact_properties.c.name)
return artifact_properties
def define_artifact_blob_locations_table(meta):
artifact_blob_locations = Table('artifact_blob_locations',
meta,
Column('id', String(36),
primary_key=True,
nullable=False),
Column('blob_id', String(36),
ForeignKey('artifact_blobs.id'),
nullable=False),
Column('value', Text(), nullable=False),
Column('created_at', DateTime(),
nullable=False),
Column('updated_at', DateTime(),
nullable=False),
Column('position', Integer()),
Column('status', String(36),
nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_blob_locations_blob_id',
artifact_blob_locations.c.blob_id)
return artifact_blob_locations
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_artifacts_table(meta),
define_artifact_tags_table(meta),
define_artifact_properties_table(meta),
define_artifact_blobs_table(meta),
define_artifact_blob_locations_table(meta),
define_artifact_dependencies_table(meta)]
create_tables(tables)
| apache-2.0 |
yayoiukai/signalserver | reports/views.py | 1 | 6002 | from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from .models import Summary, Entry, Report, Item
from fileuploads.models import Video
from groups.models import Process, Result, Row
from signals.models import Process as File_Process, Output, Signal
from policies.models import Policy, Operation
from django.contrib.auth.decorators import login_required
@login_required(login_url="/login/")
def dashboard(request):
return render(request, 'reports/dashboard.html')
def create_entry(process, summary):
values_dict = {}
results = Result.objects.filter(process=process)
entries = []
for result in results:
rows = Row.objects.filter(result=result)
video = Video.objects.get(filename=result.filename)
if video.frame_count == 0 and rows.count() > 0:
video.frame_count = rows[0].frame_number
video.save()
for row in rows:
if row.op_id in values_dict:
ls = values_dict[row.op_id]
ls.append((result.filename, row.result_number))
else:
values_dict[row.op_id] = [(result.filename, row.result_number)]
for op_id, ls in values_dict.items():
op = Operation.objects.get(id=op_id)
percentage = op.percentage
nums = []
for item in ls:
nums.append(item[1])
average = sum(nums)/len(nums)
for item in ls:
if percentage == 0.0:
continue
if average == 0:
average = 1
if ((item[1] - average)/average) * 100 > percentage:
new_entry = Entry(
summary=summary,
file_name=item[0],
operation_id=op_id,
operation_name=op.op_name,
signal_name=op.signal_name,
second_signal_name=op.second_signal_name,
percentage=percentage,
result_number=item[1],
average=average,
cut_off=op.cut_off_number
)
new_entry.save()
def get_off_values(high, low, values, times):
off_values = []
off_times = []
index = 0
while index < len(values):
value = values[index]
time = times[index]
if value >= high:
off_values.append(value)
off_times.append(time)
elif value <= low:
off_values.append(value)
off_times.append(time)
index += 1
off_values.sort()
off_times.sort()
return (off_values, off_times)
def create_item(process, report):
values_dict = {}
outputs = Output.objects.filter(process=process)
for output in outputs:
items, values, times, percentages = [], [], [], []
#output has only one signal type but signal could have multiple entries
signals = Signal.objects.filter(output=output)
for signal in signals:
values += signal.signal_values
times += signal.frame_times
if len(values) == 0:
continue
average = sum(values)/len(values)
# find parcentages it would need to generate items
# it checks against average
policy = Policy.objects.get(id=process.policy_id)
temp_operations = Operation.objects.filter(policy=policy)
operations = temp_operations.filter(signal_name=output.signal_name)
for op in operations:
if op.file_percentage > 0:
percentages.append(op.file_percentage)
percentages.sort()
op_id = operations[0].pk
op = Operation.objects.get(id=op_id)
for per in percentages:
if per > 100:
high = average * (per/100)
low = average - average*(per/100)
else:
high = average + average*(per/100)
low = average - average*(per/100)
results = get_off_values(high, low, values, times)
key = str(per) + output.signal_name
new_item = Item(
report=report,
file_name=process.file_name,
signal_name=op.get_signal_name_display,
op_id=op.pk,
total_frame_number=process.frame_count,
off_total_frame_number=len(results[0]),
percentage=per,
off_signal_values=results[0],
off_frame_times=results[1],
average=average
)
new_item.save()
if len(values) == 0:
break
def create_summary(process):
summary = Summary.objects.filter(process_id=process.id)
if summary.count() > 0:
return summary[0]
else:
new_summary = Summary(
user_name=process.user_name,
process_id=process.id,
policy_name=process.policy_name,
policy_id=process.policy_id,
group_id=process.group_id,
group_name=process.group_name,
)
new_summary.save()
new_summary.refresh_from_db()
create_entry(process, new_summary)
return new_summary
def create_report(process):
report = Report.objects.filter(process_id=process.id)
video = Video.objects.get(filename=process.file_name)
if video.frame_count == 0:
video.frame_count = process.frame_count
video.save()
if report.count() > 0:
return report[0]
else:
new_report = Report(
user_name=process.user_name,
process_id=process.id,
policy_name=process.policy_name,
policy_id=process.policy_id,
file_id=process.file_id,
file_name=process.file_name,
)
new_report.save()
create_item(process, new_report)
return new_report
| mit |
dcprojects/CoolProp | Web/_ext/edit_on_github.py | 3 | 1396 | """
Sphinx extension to add ReadTheDocs-style "Edit on GitHub" links to the
sidebar.
Loosely based on https://github.com/astropy/astropy/pull/347
Edited by Ian Bell, 2014 to add path_prefix
"""
import os
import warnings
__licence__ = 'BSD (3 clause)'
def get_github_url(app, view, path):
return 'https://github.com/{project}/{view}/{branch}/{path}'.format(
project=app.config.edit_on_github_project,
view=view,
branch=app.config.edit_on_github_branch,
path=path)
def html_page_context(app, pagename, templatename, context, doctree):
if templatename != 'page.html':
return
if not app.config.edit_on_github_project:
warnings.warn("edit_on_github_project not specified")
return
path = os.path.relpath(doctree.get('source'), app.builder.srcdir).replace('\\','/')
path_prefix = app.config.edit_on_github_path_prefix
show_url = get_github_url(app, 'blob', path_prefix + '/' + path)
edit_url = get_github_url(app, 'edit', path_prefix + '/' + path)
context['show_on_github_url'] = show_url
context['edit_on_github_url'] = edit_url
def setup(app):
app.add_config_value('edit_on_github_project', '', True)
app.add_config_value('edit_on_github_branch', 'master', True)
app.add_config_value('edit_on_github_path_prefix', '', True)
app.connect('html-page-context', html_page_context)
| mit |
7kbird/chrome | third_party/tlslite/tlslite/utils/cipherfactory.py | 151 | 2934 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Factory functions for symmetric cryptography."""
import os
from tlslite.utils import python_aes
from tlslite.utils import python_rc4
from tlslite.utils import cryptomath
tripleDESPresent = False
if cryptomath.m2cryptoLoaded:
from tlslite.utils import openssl_aes
from tlslite.utils import openssl_rc4
from tlslite.utils import openssl_tripledes
tripleDESPresent = True
if cryptomath.pycryptoLoaded:
from tlslite.utils import pycrypto_aes
from tlslite.utils import pycrypto_rc4
from tlslite.utils import pycrypto_tripledes
tripleDESPresent = True
# **************************************************************************
# Factory Functions for AES
# **************************************************************************
def createAES(key, IV, implList=None):
"""Create a new AES object.
@type key: str
@param key: A 16, 24, or 32 byte string.
@type IV: str
@param IV: A 16 byte string
@rtype: L{tlslite.utils.AES}
@return: An AES object.
"""
if implList == None:
implList = ["openssl", "pycrypto", "python"]
for impl in implList:
if impl == "openssl" and cryptomath.m2cryptoLoaded:
return openssl_aes.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return pycrypto_aes.new(key, 2, IV)
elif impl == "python":
return python_aes.new(key, 2, IV)
raise NotImplementedError()
def createRC4(key, IV, implList=None):
"""Create a new RC4 object.
@type key: str
@param key: A 16 to 32 byte string.
@type IV: object
@param IV: Ignored, whatever it is.
@rtype: L{tlslite.utils.RC4}
@return: An RC4 object.
"""
if implList == None:
implList = ["openssl", "pycrypto", "python"]
if len(IV) != 0:
raise AssertionError()
for impl in implList:
if impl == "openssl" and cryptomath.m2cryptoLoaded:
return openssl_rc4.new(key)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return pycrypto_rc4.new(key)
elif impl == "python":
return python_rc4.new(key)
raise NotImplementedError()
#Create a new TripleDES instance
def createTripleDES(key, IV, implList=None):
"""Create a new 3DES object.
@type key: str
@param key: A 24 byte string.
@type IV: str
@param IV: An 8 byte string
@rtype: L{tlslite.utils.TripleDES}
@return: A 3DES object.
"""
if implList == None:
implList = ["openssl", "pycrypto"]
for impl in implList:
if impl == "openssl" and cryptomath.m2cryptoLoaded:
return openssl_tripledes.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return pycrypto_tripledes.new(key, 2, IV)
raise NotImplementedError() | bsd-3-clause |
jdunaravich/thumbor | tests/test_utils.py | 4 | 3786 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from unittest import TestCase
from mock import Mock, patch
from preggy import expect
from thumbor.utils import (
CONTENT_TYPE, EXTENSION,
logger,
on_exception,
deprecated,
which,
)
class UtilsTestCase(TestCase):
def setUp(self, *args, **kw):
self.handled = False
super(UtilsTestCase, self).setUp(*args, **kw)
def test_can_get_content_type(self):
expect(CONTENT_TYPE.get('.jpg')).to_equal('image/jpeg')
expect(CONTENT_TYPE.get('.jpeg')).to_equal('image/jpeg')
expect(CONTENT_TYPE.get('.gif')).to_equal('image/gif')
expect(CONTENT_TYPE.get('.png')).to_equal('image/png')
expect(CONTENT_TYPE.get('.webp')).to_equal('image/webp')
expect(CONTENT_TYPE.get('.mp4')).to_equal('video/mp4')
expect(CONTENT_TYPE.get('.webm')).to_equal('video/webm')
def test_can_get_extension(self):
expect(EXTENSION.get('image/jpeg')).to_equal('.jpg')
expect(EXTENSION.get('image/gif')).to_equal('.gif')
expect(EXTENSION.get('image/png')).to_equal('.png')
expect(EXTENSION.get('image/webp')).to_equal('.webp')
expect(EXTENSION.get('video/mp4')).to_equal('.mp4')
expect(EXTENSION.get('video/webm')).to_equal('.webm')
def test_can_get_logger(self):
expect(logger.name).to_equal('thumbor')
def test_can_create_on_exception(self):
callback_mock = Mock()
inst = on_exception(callback_mock)
expect(inst.callback).to_equal(callback_mock)
expect(inst.exception_class).to_equal(Exception)
inst = on_exception(callback_mock, RuntimeError)
expect(inst.exception_class).to_equal(RuntimeError)
def test_can_handle_exceptions(self):
self.handled = False
def handle_callback(func, exc, exc_value):
expect(func).to_equal('test_func')
expect(exc).to_equal(Exception)
expect(str(exc_value)).to_equal("Test")
self.handled = True
@on_exception(handle_callback)
def test_func():
raise RuntimeError("Test")
test_func()
expect(self.handled).to_be_true()
def __can_handle_callback(self, func, exc, exc_value):
expect(func).to_equal('test_func')
expect(exc).to_equal(Exception)
expect(str(exc_value)).to_equal("Test")
self.handled = True
def test_can_handle_exceptions_with_instance(self):
self.handled = False
@on_exception(UtilsTestCase.__can_handle_callback)
def test_func(self):
raise RuntimeError("Test")
test_func(self)
expect(self.handled).to_be_true()
def test_cant_handle_exceptions_without_callback(self):
@on_exception(None)
def test_func(self):
raise RuntimeError("Test")
with expect.error_to_happen(RuntimeError):
test_func(self)
def test_deprecated_logs_msg(self):
@deprecated('func2')
def test_func():
pass
with patch.object(logger, 'warn') as mock_warn:
test_func()
mock_warn.assert_called_once_with('Deprecated function test_func: func2')
def test_can_which_by_path(self):
result = which('/bin/ls')
expect(result).to_equal('/bin/ls')
result = which('/tmp')
expect(result).to_be_null()
def test_can_which_by_env(self):
result = which('ls')
expect(result).to_equal('/bin/ls')
result = which('invalid-command')
expect(result).to_be_null()
| mit |
machawk1/pywb | pywb/rewrite/test/test_rewrite_live.py | 1 | 9395 | from pywb.rewrite.rewrite_live import LiveRewriter
from pywb.rewrite.url_rewriter import UrlRewriter
from pywb.rewrite.wburl import WbUrl
from pywb import get_test_dir
from io import BytesIO
# This module has some rewriting tests against the 'live web'
# As such, the content may change and the test may break
urlrewriter = UrlRewriter('20131226101010/http://example.com/some/path/index.html', '/pywb/')
bn_urlrewriter = UrlRewriter('20131226101010bn_/http://example.com/some/path/index.html', '/pywb/')
def head_insert_func(rule, cdx):
if rule.js_rewrite_location != 'urls':
return '<script src="/static/__pywb/wombat.js"> </script>'
else:
return ''
def test_csrf_token_headers():
rewriter = LiveRewriter()
env = {'HTTP_X_CSRFTOKEN': 'wrong', 'HTTP_COOKIE': 'csrftoken=foobar'}
req_headers = rewriter.translate_headers('http://example.com/', 'com,example)/', env)
assert req_headers == {'X-CSRFToken': 'foobar', 'Cookie': 'csrftoken=foobar'}
def test_forwarded_scheme():
rewriter = LiveRewriter()
env = {'HTTP_X_FORWARDED_PROTO': 'https', 'Other': 'Value'}
req_headers = rewriter.translate_headers('http://example.com/', 'com,example)/', env)
assert req_headers == {'X-Forwarded-Proto': 'http'}
def test_req_cookie_rewrite_1():
rewriter = LiveRewriter()
env = {'HTTP_COOKIE': 'A=B'}
urlkey = 'example,example,test)/'
url = 'test.example.example/'
req_headers = rewriter.translate_headers(url, urlkey, env)
assert req_headers == {'Cookie': 'A=B; FOO=&bar=1'}
def test_req_cookie_rewrite_2():
rewriter = LiveRewriter()
env = {'HTTP_COOKIE': 'FOO=goo'}
urlkey = 'example,example,test)/'
url = 'test.example.example/'
req_headers = rewriter.translate_headers(url, urlkey, env)
assert req_headers == {'Cookie': 'FOO=&bar=1'}
def test_req_cookie_rewrite_3():
rewriter = LiveRewriter()
env = {}
urlkey = 'example,example,test)/'
url = 'test.example.example/'
req_headers = rewriter.translate_headers(url, urlkey, env)
assert req_headers == {'Cookie': '; FOO=&bar=1'}
def test_local_1():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff, buff
# JS location and JS link rewritten
assert 'window.WB_wombat_location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html"' in buff
# link rewritten
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_no_head():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_no_head.html',
urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# location rewritten
assert 'window.WB_wombat_location = "/other.html"' in buff
# link rewritten
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_no_head_banner_only():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_no_head.html',
bn_urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# location NOT rewritten
assert 'window.location = "/other.html"' in buff
# link NOT rewritten
assert '"another.html"' in buff
def test_local_banner_only_no_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
bn_urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff
# JS location NOT rewritten, JS link NOT rewritten
assert 'window.location = "http:\/\/example.com/dynamic_page.html"' in buff, buff
# link NOT rewritten
assert '"another.html"' in buff
def test_local_2_link_only_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'example,example,test)/nolocation_rewrite')
# no wombat insert
assert '<head><script src="/static/__pywb/wombat.js"> </script>' not in buff
# JS location NOT rewritten, JS link rewritten
assert 'window.location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html"' in buff
# still link rewrite
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_2_js_loc_only_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'example,example,test,loconly)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# JS location rewritten, JS link NOT rewritten
assert 'window.WB_wombat_location = "http:\/\/example.com/dynamic_page.html"' in buff
# still link rewrite in HTML
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_2_no_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'example,example,test,norewrite)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# JS location NOT rewritten, JS link NOT rewritten
assert 'window.location = "http:\/\/example.com/dynamic_page.html"' in buff
# still link rewrite in HTML
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_unclosed_script():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_unclosed_script.html',
urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff, buff
# JS location and JS link rewritten
assert 'window.WB_wombat_location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html";\n}\n</script>' in buff, buff
def test_example_1():
status_headers, buff = get_rewritten('http://example.com/', urlrewriter, req_headers={'Connection': 'close'})
# verify header rewriting
assert (('X-Archive-Orig-connection', 'close') in status_headers.headers), status_headers
# verify utf-8 charset detection
assert status_headers.get_header('content-type') == 'text/html; charset=utf-8'
assert '/pywb/20131226101010/http://www.iana.org/domains/example' in buff, buff
def test_example_2_redirect():
status_headers, buff = get_rewritten('http://httpbin.org/redirect-to?url=http://example.com/', urlrewriter)
# redirect, no content
assert status_headers.get_statuscode() == '302'
assert len(buff) == 0
def test_example_3_rel():
status_headers, buff = get_rewritten('//example.com/', urlrewriter)
assert status_headers.get_statuscode() == '200'
def test_example_4_rewrite_err():
# may occur in case of rewrite mismatch, the /// gets stripped off
status_headers, buff = get_rewritten('http://localhost:8080///example.com/', urlrewriter)
assert status_headers.get_statuscode() == '200'
def test_example_domain_specific_3():
status_headers, buff = get_rewritten('http://facebook.com/digitalpreservation', urlrewriter, follow_redirects=True)
# comment out Bootloader.configurePage, if it is still there
if 'Bootloader.configurePage' in buff:
assert '/* Bootloader.configurePage' in buff
def test_wombat_top():
#status_headers, buff = get_rewritten('https://assets-cdn.github.com/assets/github-0f06d0f46fe7bcfbf31f2380f23aec15ba21b8ec.js', urlrewriter)
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/toptest.js', urlrewriter)
assert 'WB_wombat_top!==window' in buff
def test_post():
buff = BytesIO('ABC=DEF')
env = {'REQUEST_METHOD': 'POST',
'HTTP_ORIGIN': 'http://httpbin.org',
'HTTP_HOST': 'httpbin.org',
'wsgi.input': buff}
status_headers, resp_buff = get_rewritten('http://httpbin.org/post', urlrewriter, env=env)
assert status_headers.get_statuscode() == '200', status_headers
def get_rewritten(*args, **kwargs):
return LiveRewriter().get_rewritten(remote_only=False, *args, **kwargs)
| gpl-3.0 |
DynoGraph/stinger-dynograph | lib/dynograph_util/googletest/test/gtest_xml_output_unittest.py | 336 | 14677 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
 Expected: 1
To be equal to: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected: 1
To be equal to: 2%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
 Expected: 1
To be equal to: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected: 1
To be equal to: 2%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
 Expected: 2
To be equal to: 3" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected: 2
To be equal to: 3%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| gpl-3.0 |
themech/Machine-Learning-Coursera-Tensorflow | ex3-multi-class-classification/2_neural_networks.py | 1 | 1500 | # This classifies the same digit as the logistic regression classifiers from
# the first step. But here we're using a pre-trained neural network classifier
# (loaded from data/ex3weights.mat)
import numpy as np
from scipy import io
from sklearn import metrics
# Load the data.
filename = 'data/ex3data1.mat'
data = io.loadmat(filename)
X_data, Y_data = data['X'], data['y']
numSamples = X_data.shape[0]
# Add a 'constant' to each of the rows.
X_data = np.insert(X_data, 0, 1, axis=1)
print("X_data shape ", X_data.shape, ", Y_data shape", Y_data.shape)
# Load the pre-trained network.
weights = io.loadmat('data/ex3weights.mat')
theta1, theta2 = weights['Theta1'], weights['Theta2']
print("Theta1 shape", theta1.shape, ", theta2 shape", theta2.shape)
# Classify the input data using the pre-trained network/
a1 = X_data
z2 = np.matmul(a1, np.transpose(theta1)) # (5000,401) @ (25,401).T = (5000,25)
print("z2 shape", z2.shape)
z2 = np.insert(z2, 0, values=np.ones(z2.shape[0]), axis=1)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
a2 = sigmoid(z2)
print("a2 shape", a2.shape) # (5000, 26)
z3 = np.matmul(a2, np.transpose(theta2))
print("z3 shape", z3.shape) # (5000, 10)
a3 = sigmoid(z3)
# Numpy is 0 base index. We add +1 to make it compatible with matlab (so we can
# compare y_pred with the correct answers from Y_data).
y_pred = np.argmax(a3, axis=1) + 1
print("y_pred shape", y_pred.shape) # (5000,)
# Print the report
print(metrics.classification_report(Y_data, y_pred))
| mit |
Geoion/MITMf | core/sslstrip/StrippingProxy.py | 31 | 1263 | # Copyright (c) 2014-2016 Moxie Marlinspike, Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
from twisted.web.http import HTTPChannel
from ClientRequest import ClientRequest
class StrippingProxy(HTTPChannel):
'''sslstrip is, at heart, a transparent proxy server that does some unusual things.
This is the basic proxy server class, where we get callbacks for GET and POST methods.
We then proxy these out using HTTP or HTTPS depending on what information we have about
the (connection, client_address) tuple in our cache.
'''
requestFactory = ClientRequest
| gpl-3.0 |
smendez-hi/SUMO-hib | tools/net/xmlnodes_applyOffset.py | 3 | 2018 | #!/usr/bin/env python
"""
@file xmlnodes_applyOffset.py
@author Daniel Krajzewicz
@date 2009-08-01
@version $Id: xmlnodes_applyOffset.py 11671 2012-01-07 20:14:30Z behrisch $
Applies a given offset to edges given in an xml-node-file.
The results are written into <XMLNODES>.mod.xml.
Call: xmlnodes_applyOffset.py <XMLNODES> <X-OFFSET> <Y-OFFSET>
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2009-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, string, sys, StringIO
from xml.sax import saxutils, make_parser, handler
class XMLNodesReader(handler.ContentHandler):
def __init__(self, outFileName, xoff, yoff):
self._out = open(outFileName, 'w')
self._xoff = xoff
self._yoff = yoff
def endDocument(self):
self._out.close()
def startElement(self, name, attrs):
self._out.write('<' + name)
for (key, value) in attrs.items():
if key == "x":
self._out.write(' %s="%s"' % (key, saxutils.escape(str(float(value)+self._xoff))))
elif key == "y":
self._out.write(' %s="%s"' % (key, saxutils.escape(str(float(value)+self._yoff))))
else:
self._out.write(' %s="%s"' % (key, saxutils.escape(value)))
self._out.write('>')
def endElement(self, name):
self._out.write('</' + name + '>')
def characters(self, content):
self._out.write(saxutils.escape(content))
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
if len(sys.argv) < 4:
print "Usage: " + sys.argv[0] + " <XMLNODES> <X-OFFSET> <Y-OFFSET>"
sys.exit()
parser = make_parser()
reader = XMLNodesReader(sys.argv[1]+".mod.xml", float(sys.argv[2]), float(sys.argv[3]))
parser.setContentHandler(reader)
parser.parse(sys.argv[1])
| gpl-3.0 |
dozed/Mediawiker | mwclient/page_nowriteapi.py | 3 | 3632 | import sys
pythonver = sys.version_info[0]
import time
if pythonver >= 3:
from html.parser import HTMLParser
from html.entities import name2codepoint
else:
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
import errors
class OldPage(object):
@staticmethod
def save(self, text = u'', summary = u'', minor = False):
data = {}
data['wpTextbox1'] = text
data['wpSummary'] = summary
data['wpSave'] = 'Save page'
data['wpEditToken'] = self.get_token('edit')
if self.last_rev_time:
data['wpEdittime'] = time.strftime('%Y%m%d%H%M%S', self.last_rev_time)
else:
data['wpEdittime'] = time.strftime('%Y%m%d%H%M%S', time.gmtime())
if self.edit_time:
data['wpStarttime'] = time.strftime('%Y%m%d%H%M%S', self.edit_time)
else:
data['wpStarttime'] = time.strftime('%Y%m%d%H%M%S', time.gmtime())
data['wpStarttime'] = time.strftime('%Y%m%d%H%M%S', time.gmtime())
if minor: data['wpMinoredit'] = '1'
data['title'] = self.name
page_data = self.site.raw_index('submit', **data)
page = EditPage('editform')
page.feed(page_data)
page.close()
if page.data:
if page.readonly: raise errors.ProtectedPageError(self)
self.get_token('edit', True)
raise errors.EditError(page.title, data)
@staticmethod
def move(self, new_title, reason = '', move_talk = True):
postdata = {'wpNewTitle': new_title,
'wpOldTitle': self.name,
'wpReason': reason,
'wpMove': '1',
'wpEditToken': self.get_token('move')}
if move_talk: postdata['wpMovetalk'] = '1'
postdata['title'] = 'Special:Movepage'
page_data = self.site.raw_index('submit', **data)
page = EditPage('movepage')
page.feed(page_data.decode('utf-8', 'ignore'))
page.close()
if 'wpEditToken' in page.data:
raise errors.EditError(page.title, postdata)
@staticmethod
def delete(self, reason = ''):
postdata = {'wpReason': reason,
'wpConfirmB': 'Delete',
'mw-filedelete-submit': 'Delete',
'wpEditToken': self.get_token('delete'),
'title': self.name}
page_data = self.site.raw_index('delete', **postdata)
class EditPage(HTMLParser):
def __init__(self, form):
HTMLParser.__init__(self)
self.form = form
self.in_form = False
self.in_text = False
self.in_title = False
self.data = {}
self.textdata = []
self.title = u''
self.readonly = True
def handle_starttag(self, tag, attrs):
self.in_title = (tag == 'title')
if (u'id', self.form) in attrs:
attrs = dict(attrs)
self.in_form = True
self.action = attrs['action']
if tag == 'input' and self.in_form and (u'type', u'submit') \
not in attrs and (u'type', u'checkbox') not in attrs:
attrs = dict(attrs)
if u'name' in attrs: self.data[attrs[u'name']] = attrs.get(u'value', u'')
if self.in_form and tag == 'textarea':
self.in_text = True
self.readonly = (u'readonly', u'readonly') in attrs
def handle_endtag(self, tag):
if self.in_title and tag == 'title': self.in_title = False
if self.in_form and tag == 'form': self.in_form = False
if self.in_text and tag == 'textarea': self.in_text = False
def handle_data(self, data):
if self.in_text: self.textdata.append(data)
if self.in_title: self.title += data
def handle_entityref(self, name):
if name in name2codepoint:
if pythonver >= 3:
self.handle_data(chr(name2codepoint[name]))
else:
self.handle_data(unichr(name2codepoint[name]))
else:
self.handle_data(u'&%s;' % name)
def handle_charref(self, name):
try:
if pythonver >= 3:
self.handle_data(chr(int(name)))
else:
self.handle_data(unichr(int(name)))
except ValueError:
self.handle_data(u'&#$s;' % name)
| mit |
marcuskelly/recover | Lib/site-packages/Crypto/Cipher/PKCS1_OAEP.py | 9 | 9570 | # -*- coding: utf-8 -*-
#
# Cipher/PKCS1_OAEP.py : PKCS#1 OAEP
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""RSA encryption protocol according to PKCS#1 OAEP
See RFC3447__ or the `original RSA Labs specification`__ .
This scheme is more properly called ``RSAES-OAEP``.
As an example, a sender may encrypt a message in this way:
>>> from Crypto.Cipher import PKCS1_OAEP
>>> from Crypto.PublicKey import RSA
>>>
>>> message = b'To be encrypted'
>>> key = RSA.importKey(open('pubkey.der').read())
>>> cipher = PKCS1_OAEP.new(key)
>>> ciphertext = cipher.encrypt(message)
At the receiver side, decryption can be done using the private part of
the RSA key:
>>> key = RSA.importKey(open('privkey.der').read())
>>> cipher = PKCS1_OAP.new(key)
>>> message = cipher.decrypt(ciphertext)
.. __: http://www.ietf.org/rfc/rfc3447.txt
.. __: http://www.rsa.com/rsalabs/node.asp?id=2125.
"""
__all__ = [ 'new', 'PKCS1OAEP_Cipher' ]
from Crypto.Signature.pss import MGF1
import Crypto.Hash.SHA1
from Crypto.Util.py3compat import *
import Crypto.Util.number
from Crypto.Util.number import ceil_div, bytes_to_long, long_to_bytes
from Crypto.Util.strxor import strxor
from Crypto import Random
class PKCS1OAEP_Cipher:
"""This cipher can perform PKCS#1 v1.5 OAEP encryption or decryption."""
def __init__(self, key, hashAlgo, mgfunc, label, randfunc):
"""Initialize this PKCS#1 OAEP cipher object.
:Parameters:
key : an RSA key object
If a private half is given, both encryption and decryption are possible.
If a public half is given, only encryption is possible.
hashAlgo : hash object
The hash function to use. This can be a module under `Crypto.Hash`
or an existing hash object created from any of such modules. If not specified,
`Crypto.Hash.SHA1` is used.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
If not specified, the standard MGF1 is used (a safe choice).
label : byte string
A label to apply to this particular encryption. If not specified,
an empty string is used. Specifying a label does not improve
security.
randfunc : callable
A function that returns random bytes.
:attention: Modify the mask generation function only if you know what you are doing.
Sender and receiver must use the same one.
"""
self._key = key
if hashAlgo:
self._hashObj = hashAlgo
else:
self._hashObj = Crypto.Hash.SHA1
if mgfunc:
self._mgf = mgfunc
else:
self._mgf = lambda x,y: MGF1(x,y,self._hashObj)
self._label = label
self._randfunc = randfunc
def can_encrypt(self):
"""Return True/1 if this cipher object can be used for encryption."""
return self._key.can_encrypt()
def can_decrypt(self):
"""Return True/1 if this cipher object can be used for decryption."""
return self._key.can_decrypt()
def encrypt(self, message):
"""Produce the PKCS#1 OAEP encryption of a message.
This function is named ``RSAES-OAEP-ENCRYPT``, and is specified in
section 7.1.1 of RFC3447.
:Parameters:
message : byte string
The message to encrypt, also known as plaintext. It can be of
variable length, but not longer than the RSA modulus (in bytes)
minus 2, minus twice the hash output size.
:Return: A byte string, the ciphertext in which the message is encrypted.
It is as long as the RSA modulus (in bytes).
:Raise ValueError:
If the RSA key length is not sufficiently long to deal with the given
message.
"""
# TODO: Verify the key is RSA
# See 7.1.1 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
hLen = self._hashObj.digest_size
mLen = len(message)
# Step 1b
ps_len = k-mLen-2*hLen-2
if ps_len<0:
raise ValueError("Plaintext is too long.")
# Step 2a
lHash = self._hashObj.new(self._label).digest()
# Step 2b
ps = bchr(0x00)*ps_len
# Step 2c
db = lHash + ps + bchr(0x01) + message
# Step 2d
ros = self._randfunc(hLen)
# Step 2e
dbMask = self._mgf(ros, k-hLen-1)
# Step 2f
maskedDB = strxor(db, dbMask)
# Step 2g
seedMask = self._mgf(maskedDB, hLen)
# Step 2h
maskedSeed = strxor(ros, seedMask)
# Step 2i
em = bchr(0x00) + maskedSeed + maskedDB
# Step 3a (OS2IP)
em_int = bytes_to_long(em)
# Step 3b (RSAEP)
m_int = self._key._encrypt(em_int)
# Step 3c (I2OSP)
c = long_to_bytes(m_int, k)
return c
def decrypt(self, ct):
"""Decrypt a PKCS#1 OAEP ciphertext.
This function is named ``RSAES-OAEP-DECRYPT``, and is specified in
section 7.1.2 of RFC3447.
:Parameters:
ct : byte string
The ciphertext that contains the message to recover.
:Return: A byte string, the original message.
:Raise ValueError:
If the ciphertext length is incorrect, or if the decryption does not
succeed.
:Raise TypeError:
If the RSA key has no private half.
"""
# See 7.1.2 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
hLen = self._hashObj.digest_size
# Step 1b and 1c
if len(ct) != k or k<hLen+2:
raise ValueError("Ciphertext with incorrect length.")
# Step 2a (O2SIP)
ct_int = bytes_to_long(ct)
# Step 2b (RSADP)
m_int = self._key._decrypt(ct_int)
# Complete step 2c (I2OSP)
em = long_to_bytes(m_int, k)
# Step 3a
lHash = self._hashObj.new(self._label).digest()
# Step 3b
y = em[0]
# y must be 0, but we MUST NOT check it here in order not to
# allow attacks like Manger's (http://dl.acm.org/citation.cfm?id=704143)
maskedSeed = em[1:hLen+1]
maskedDB = em[hLen+1:]
# Step 3c
seedMask = self._mgf(maskedDB, hLen)
# Step 3d
seed = strxor(maskedSeed, seedMask)
# Step 3e
dbMask = self._mgf(seed, k-hLen-1)
# Step 3f
db = strxor(maskedDB, dbMask)
# Step 3g
valid = 1
one = db[hLen:].find(bchr(0x01))
lHash1 = db[:hLen]
if lHash1!=lHash:
valid = 0
if one<0:
valid = 0
if bord(y)!=0:
valid = 0
if not valid:
raise ValueError("Incorrect decryption.")
# Step 4
return db[hLen+one+1:]
def new(key, hashAlgo=None, mgfunc=None, label=b(''), randfunc=None):
"""Return a cipher object `PKCS1OAEP_Cipher` that can be used to perform PKCS#1 OAEP encryption or decryption.
:Parameters:
key : RSA key object
The key to use to encrypt or decrypt the message. This is a `Crypto.PublicKey.RSA` object.
Decryption is only possible if *key* is a private RSA key.
hashAlgo : hash object
The hash function to use. This can be a module under `Crypto.Hash`
or an existing hash object created from any of such modules. If not specified,
`Crypto.Hash.SHA1` is used.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
If not specified, the standard MGF1 is used (a safe choice).
label : byte string
A label to apply to this particular encryption. If not specified,
an empty string is used. Specifying a label does not improve
security.
randfunc : callable
A function that returns random bytes.
The default is `Random.get_random_bytes`.
:attention: Modify the mask generation function only if you know what you are doing.
Sender and receiver must use the same one.
"""
if randfunc is None:
randfunc = Random.get_random_bytes
return PKCS1OAEP_Cipher(key, hashAlgo, mgfunc, label, randfunc)
| bsd-2-clause |
alanfgates/hive | testutils/ptest/Ssh.py | 18 | 4425 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from Queue import Queue
import Process
class SSHConnection():
def __init__(self, host, num = None):
self.host = host
if num is None:
self.hostname = host
else:
self.hostname = host + '-' + str(num);
self.pwd = '/'
self.env = {}
self.path = []
def cd(self, path):
self.pwd = path.format(host = self.hostname)
def export(self, env, value):
self.env[env] = value.format(host = self.hostname)
def add_path(self, path):
self.path.append(path.format(host = self.hostname))
def prefix(self, cmd):
pre = []
pre.append('cd "{0}"'.format(self.pwd))
for (e, v) in self.env.iteritems():
pre.append('export {0}=\\"{1}\\"'.format(e, v))
for p in self.path:
pre.append('export PATH="{0}:${{PATH}}"'.format(p))
pre.append(cmd)
return ' && '.join(pre)
def run(self, cmd, warn_only = False, quiet = False, vewy_quiet = False,
abandon_output = True):
# Don't use single quotes in `cmd`, this will break and end badly.
cmd = cmd.format(host = self.hostname)
cmd = self.prefix(cmd)
print(self.hostname + ' =>')
if vewy_quiet:
# Be vewy, vewy quiet, I'm hunting wabbits.
print('[command hidden]\n')
quiet = True
else:
print(cmd + '\n')
cmd = '''ssh '{0}' "bash -c '{1}'"'''.format(self.host, cmd)
try:
return Process.run(cmd, quiet, abandon_output)
except Exception as e:
if warn_only:
print(str(e) + '---------- This was only a warning, ' +
'it won\'t stop the execution --\n')
return None
else:
raise e
class SSHSet():
def __init__(self, conn = []):
self.conn = conn
def __len__(self):
return len(self.conn)
def add(self, conn):
if isinstance(conn, list):
self.conn.extend(conn)
else:
self.conn.append(conn)
def cd(self, path):
for conn in self.conn:
conn.cd(path)
def export(self, env, value):
for conn in self.conn:
conn.export(env, value)
def add_path(self, path):
for conn in self.conn:
conn.add_path(path)
def run(self, cmd, parallel = True, quiet = False, vewy_quiet = False,
abandon_output = True, warn_only = False):
if not parallel:
for conn in self.conn:
conn.run(cmd, quiet = quiet, vewy_quiet = vewy_quiet,
abandon_output = abandon_output, warn_only = warn_only)
else:
threads = []
queue = Queue()
def wrapper(conn, cmd, queue):
try:
conn.run(cmd, quiet = quiet, vewy_quiet = vewy_quiet,
abandon_output = abandon_output,
warn_only = warn_only)
except Exception as e:
queue.put(Exception(conn.hostname + ' => ' + str(e)))
for conn in self.conn:
thread = Thread(target = wrapper, args = (conn, cmd, queue, ))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
if not queue.empty():
l = []
while not queue.empty():
e = queue.get()
l.append(str(e));
raise Exception('\n'.join(l))
| apache-2.0 |
hendradarwin/VTK | ThirdParty/Twisted/twisted/test/process_twisted.py | 41 | 1149 | """A process that reads from stdin and out using Twisted."""
### Twisted Preamble
# This makes sure that users don't have to set up their environment
# specially in order to run these programs from bin/.
import sys, os
pos = os.path.abspath(sys.argv[0]).find(os.sep+'Twisted')
if pos != -1:
sys.path.insert(0, os.path.abspath(sys.argv[0])[:pos+8])
sys.path.insert(0, os.curdir)
### end of preamble
from twisted.python import log
from zope.interface import implements
from twisted.internet import interfaces
log.startLogging(sys.stderr)
from twisted.internet import protocol, reactor, stdio
class Echo(protocol.Protocol):
implements(interfaces.IHalfCloseableProtocol)
def connectionMade(self):
print "connection made"
def dataReceived(self, data):
self.transport.write(data)
def readConnectionLost(self):
print "readConnectionLost"
self.transport.loseConnection()
def writeConnectionLost(self):
print "writeConnectionLost"
def connectionLost(self, reason):
print "connectionLost", reason
reactor.stop()
stdio.StandardIO(Echo())
reactor.run()
| bsd-3-clause |
ualikhansars/Gwent | lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/adapter.py | 373 | 1695 | """
This object provides quoting for GEOS geometries into PostgreSQL/PostGIS.
"""
from __future__ import unicode_literals
from psycopg2 import Binary
from psycopg2.extensions import ISQLQuote
class PostGISAdapter(object):
def __init__(self, geom, geography=False):
"Initializes on the geometry."
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry.
self.ewkb = bytes(geom.ewkb)
self.srid = geom.srid
self.geography = geography
self._adapter = Binary(self.ewkb)
def __conform__(self, proto):
# Does the given protocol conform to what Psycopg2 expects?
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')
def __eq__(self, other):
if not isinstance(other, PostGISAdapter):
return False
return (self.ewkb == other.ewkb) and (self.srid == other.srid)
def __hash__(self):
return hash((self.ewkb, self.srid))
def __str__(self):
return self.getquoted()
def prepare(self, conn):
"""
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
"""
self._adapter.prepare(conn)
def getquoted(self):
"Returns a properly quoted string for use in PostgreSQL/PostGIS."
# psycopg will figure out whether to use E'\\000' or '\000'
return str('%s(%s)' % (
'ST_GeogFromWKB' if self.geography else 'ST_GeomFromEWKB',
self._adapter.getquoted().decode())
)
| mit |
invisiblek/python-for-android | python3-alpha/python3-src/Lib/encodings/iso8859_10.py | 272 | 13589 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
'\xad' # 0xAD -> SOFT HYPHEN
'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
'\xb7' # 0xB7 -> MIDDLE DOT
'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
'\u2015' # 0xBD -> HORIZONTAL BAR
'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
fkraemer/qt_multi_annotator | qt_multi_annotator.py | 1 | 16390 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtCore, QtGui
import numpy as np
from annotation_helper import *
import unittest
import os
import pickle
import argparse
#for file listing from path
CLASSBUTTON_MIN_HEIGHT = 30
CLASSBUTTON_MIN_WIDTH = 40
MAX_CLASSES = 20
IMG_SIZE_X = 800
IMG_SIZE_Y = 1024
MARKER_NEUTRAL_COLOR = (255, 255, 255)
MARKER_NEUTRAL_CLASS = 255
DEFAULT_MARKER_ALPHA = 255
DEFAULT_WATERSHED_ALPHA = 220
DEFAULT_BACKGROUND_ALPHA = 155
DEFAULT_CLASS_NUM = 5
CLASSNAMES_FILENAME = 'classnames.txt'
parser = argparse.ArgumentParser(description='Label a folders files')
parser.add_argument('path', type=str,
help='where to look for images, non-recursive')
parser.add_argument('--classes', type=str,
help= ('class names seperated by commas: foo1,bar2,yala (max: %d)' % MAX_CLASSES) )
#@brief overwrite to pass the class id when clicked with signal "clickedClass"
class flosQPushButton(QtGui.QPushButton):
clickedClass = QtCore.pyqtSignal(int)
def __init__(self, _str, id):
super(flosQPushButton,self).__init__(_str)
self.classId = id
QtCore.QObject.connect(self, QtCore.SIGNAL('clicked()'), self.clickedSignal)
def clickedSignal(self):
self.clickedClass.emit(self.classId)
class AnnotationWindow(QtGui.QWidget):
def __init__(self, imgBackbone, nameList):
"""
for every entry in nameList a labelling button is created
:type imgBackbone: ImageBackbone
:type nameList: list
"""
QtGui.QWidget.__init__(self)
self.imgBackbone = imgBackbone
self.classMax = classMax
self.watershedActivated = False
self.activeClassButtonId = 0
self.nameList = nameList
self.goToChangeSlotActive = True
self.scribbleArea = ScribbleImage(DEFAULT_MARKER_ALPHA, MARKER_NEUTRAL_COLOR, IMG_SIZE_Y, IMG_SIZE_X, parent=self)
self.scribbleArea.setActiveMarkerClass(self.activeClassButtonId)
self.setupUI()
self.connectThings()
imgBackbone.loadImg(0)
def connectThings(self):
# GUI stuff
#Left
QtCore.QObject.connect(self.nextButton, QtCore.SIGNAL('clicked()'), self.slot_openNextImg) #this forwards basically to goTo valuechanged signal
QtCore.QObject.connect(self.previousButton, QtCore.SIGNAL('clicked()'), self.slot_openPreviousImg) #this forwards basically to goTo valuechanged signal
QtCore.QObject.connect(self.goToWidget, QtCore.SIGNAL('valueChanged(int)'), self.slot_openImage)
#Middle
QtCore.QObject.connect(self.scribbleArea, QtCore.SIGNAL('markerUpdate()'), self.slot_computeWatershed)
for d in self.classButtonList:
QtCore.QObject.connect(d, QtCore.SIGNAL('clickedClass(int)'), self.slot_classButtonPushed)
QtCore.QObject.connect(self.eraseMarkerButton, QtCore.SIGNAL('clicked()'), self.slot_eraseButtonPushed)
#Right
QtCore.QObject.connect(self.exitButton, QtCore.SIGNAL('clicked()'), self.slot_closeHandle)
QtCore.QObject.connect(self.saveButton, QtCore.SIGNAL('clicked()'), self.slot_save)
QtCore.QObject.connect(self.saveAndNextButton, QtCore.SIGNAL('clicked()'), self.slot_saveAndNext)
QtCore.QObject.connect(self.clearMarkerButton, QtCore.SIGNAL('clicked()'), self.slot_clearMarkerImage)
QtCore.QObject.connect(self.segmentationActiveWidget, QtCore.SIGNAL('stateChanged(int)'), self.slot_watershedActiveChange)
#Background tasks
QtCore.QObject.connect(self.imgBackbone, QtCore.SIGNAL('imgLoaded(QImage, int , QString, int, int)'), self.slot_updateImg)
QtCore.QObject.connect(self.imgBackbone, QtCore.SIGNAL('imgLoaded(QImage, int , QString, int, int)'),
self.scribbleArea.slot_setBackgroundImage)
QtCore.QObject.connect(self.imgBackbone, QtCore.SIGNAL('watershedUpdate(QImage)'), self.scribbleArea.slot_setWatershedImg)
QtCore.QObject.connect(self.imgBackbone, QtCore.SIGNAL('markersLoaded(QImage)'), self.scribbleArea.slot_setMarkerImage)
def setupUI(self):
#Left Column
self.currentImgLabel = QtGui.QLabel("ImageName \n 0 / N")
self.goToWidget = QtGui.QSpinBox() #configured in button update
self.previousButton = QtGui.QPushButton("<<")
self.nextButton = QtGui.QPushButton(">>")
vboxLeft = QtGui.QVBoxLayout()
vboxLeft.setAlignment(QtCore.Qt.AlignTop)
vboxLeft.addWidget(self.currentImgLabel)
vboxLeft.addWidget(self.goToWidget)
vboxLeft.addWidget(self.previousButton)
vboxLeft.addWidget(self.nextButton)
#Middle Column
vboxMiddle = QtGui.QVBoxLayout()
hboxMiddle = QtGui.QHBoxLayout()
hboxMiddle.setAlignment(QtCore.Qt.AlignLeft)
currentClassLabel = QtGui.QLabel("Class:")
hboxMiddle.addWidget(currentClassLabel)
#upper row: BUTTONS
self.classButtonList = list()
for i in range(0,self.classMax):
qP = flosQPushButton("%s" % self.nameList[i],i )
qP.setMinimumWidth(CLASSBUTTON_MIN_WIDTH)
qP.setMinimumHeight(CLASSBUTTON_MIN_HEIGHT)
self.classButtonList.append(qP)
hboxMiddle.addWidget(qP)
self.eraseMarkerButton = QtGui.QPushButton("Erase")
self.eraseMarkerButton.setMinimumWidth(CLASSBUTTON_MIN_WIDTH*2)
self.eraseMarkerButton.setMinimumHeight(CLASSBUTTON_MIN_HEIGHT)
hboxMiddle.addWidget(self.eraseMarkerButton)
self.guiActivateClassButton()
vboxMiddle.addLayout(hboxMiddle)
#lower part: image
self.scribbleArea.setMinimumHeight(IMG_SIZE_X)
self.scribbleArea.setMinimumWidth(IMG_SIZE_Y)
vboxMiddle.addWidget(self.scribbleArea)
##Right Column Sub groups
###watershed params
vboxRightWatershedSubLayout = QtGui.QVBoxLayout()
self.segmentationActiveWidget = QtGui.QCheckBox("Paint Segm.")
# self.watershedOptions = ['Normal','Foo','Bar']
# self.watershedEdit = QtGui.QComboBox()
# self.watershedEdit.addItems(self.watershedOptions)
# vboxRightWatershedSubLayout.addWidget(self.watershedEdit)
vboxRightWatershedSubLayout.addWidget(self.segmentationActiveWidget)
watershedOptionsGroup = QtGui.QGroupBox("Segmentation")
# watershedGroup.setStyleSheet('border: 1px solid black; border-radius: 5px; margin-top: 1ex')
watershedOptionsGroup.setLayout(vboxRightWatershedSubLayout)
###save params
self.clearMarkerButton = QtGui.QPushButton("Clear Markers")
self.saveButton = QtGui.QPushButton("Save")
self.saveAndNextButton = QtGui.QPushButton('Save + \n Next')
self.saveAndNextButton.setMinimumHeight(100)
self.exitButton = QtGui.QPushButton('Exit')
self.exitButton.setMinimumHeight(100)
vboxRightSaveSubLayout = QtGui.QVBoxLayout()
vboxRightSaveSubLayout.addWidget(self.clearMarkerButton)
vboxRightSaveSubLayout.addWidget(self.saveButton)
vboxRightSaveSubLayout.addWidget(self.saveAndNextButton)
vboxRightSaveSubLayout.addWidget(self.exitButton)
imageOptionsGroup = QtGui.QGroupBox("Image")
# watershedGroup.setStyleSheet('border: 1px solid black; border-radius: 5px; margin-top: 1ex')
imageOptionsGroup.setLayout(vboxRightSaveSubLayout)
# Right Column Top Layout
vboxRight = QtGui.QVBoxLayout()
vboxRight.addStretch(1)
vboxRight.addWidget(watershedOptionsGroup)
vboxRight.addWidget(imageOptionsGroup)
hbox = QtGui.QHBoxLayout()
hbox.addStretch(1)
hbox.addLayout(vboxLeft)
hbox.addLayout(vboxMiddle)
hbox.addLayout(vboxRight)
self.setLayout(hbox)
self.setWindowTitle('Multi Class Annotation Tool')
self.show()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Q:
self.slot_closeHandle()
if e.key() == QtCore.Qt.Key_S:
self.slot_save()
if e.key() == QtCore.Qt.Key_T:
self.segmentationActiveWidget.toggle()
#Debug print e.key()
def guiActivateClassButton(self):
deactivatedButtonList = range(0,len(self.classButtonList))
try:
deactivatedButtonList.remove(self.activeClassButtonId)
except ValueError:
pass # it was just the erase class
for i in deactivatedButtonList:
self.classButtonList[i].setStyleSheet(
"background-color: rgb(%d, %d, %d); border: none" % classToColorTuple(i))
#extra handling of erase mode
if self.activeClassButtonId == MARKER_NEUTRAL_CLASS: # we are in erase mode
self.eraseMarkerButton.setStyleSheet("background-color: white; border: 3px solid black")
else:
self.eraseMarkerButton.setStyleSheet("background-color: white; border: 3px solid red")
self.classButtonList[self.activeClassButtonId].setStyleSheet(
"background-color: rgb(%d, %d, %d); border: 3px solid black" % classToColorTuple(self.activeClassButtonId))
def guiButtonUpdate(self,currentId,minId,maxId):
self.goToWidget.setMaximum(maxId-1)
self.goToWidget.setMinimum(minId)
self.goToWidget.setKeyboardTracking(False)
#the goto widgets value change should not provoke a load request
self.goToChangeSlotActive = False
self.goToWidget.setValue(currentId)
self.goToChangeSlotActive = True
if currentId == (maxId-1):
self.nextButton.setEnabled(False)
self.saveAndNextButton.setEnabled(False)
else:
self.nextButton.setEnabled(True)
self.saveAndNextButton.setEnabled(True)
if currentId == minId:
self.previousButton.setEnabled(False)
else:
self.previousButton.setEnabled(True)
def saveThisImageAndMask(self):
self.imgBackbone.markerUpdate(self.scribbleArea.markerImgRGB)
self.imgBackbone.computeWatershedUpdate()
self.imgBackbone.save()
def slot_openImage(self,id):
if self.goToChangeSlotActive:
self.imgBackbone.loadImg(id)
def slot_openNextImg(self):
self.imgBackbone.getNextImg()
def slot_openPreviousImg(self):
self.imgBackbone.getPreviousImg()
def slot_closeHandle(self):
self.close()
def slot_save(self):
self.saveThisImageAndMask()
def slot_saveAndNext(self):
self.saveThisImageAndMask()
self.slot_openNextImg()
def slot_updateImg(self,qImg,id,imgName,minId,maxId):
#set new id
self.guiButtonUpdate(id,minId,maxId)
self.currentImgLabel.setText('%s \n %3d / %3d' %(imgName,id,maxId-1))
def slot_clearMarkerImage(self):
self.scribbleArea.clearImage()
if self.watershedActivated:
self.segmentationActiveWidget.setChecked(False)
#TODO test, whether this emits the signal and provokes a watershed update
def slot_idNotValid(self,id,minId, maxId):
#TODO show dialog
print '%d is not a valid image id, must be between %d and %d' % (id,minId, maxId,)
def slot_computeWatershed(self):
#TODO do some timer stuff here, to not do it too often
if self.watershedActivated:
self.imgBackbone.markerUpdate(self.scribbleArea.markerImgRGB)
self.imgBackbone.computeWatershedUpdate()
def slot_watershedActiveChange(self,i):
if i > 0:
self.watershedActivated = True
self.scribbleArea.showWatershed(True)
self.slot_computeWatershed()
else:
self.watershedActivated = False
self.scribbleArea.showWatershed(False)
def slot_classButtonPushed(self,classId):
self.activeClassButtonId = classId
self.guiActivateClassButton()
self.scribbleArea.setActiveMarkerClass(classId)
def slot_eraseButtonPushed(self):
self.activeClassButtonId = MARKER_NEUTRAL_CLASS #this will 'unset' all other buttons
self.guiActivateClassButton()
self.scribbleArea.setActiveMarkerClass(2)
self.scribbleArea.setEraseMode()
def loadClassNamesOrSetNew(path, rawNames):
path = os.path.join(path,CLASSNAMES_FILENAME)
if os.path.exists(path) and not rawNames:
with open(path, 'rb') as f:
names = pickle.load(f)
elif rawNames:
names = rawNames.split(',')
with open(path, 'wb') as f:
pickle.dump(names, f)
else:
names = [ str(i) for i in range(0,DEFAULT_CLASS_NUM)]
return names
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
args = parser.parse_args()
if not os.path.exists(args.path):
print 'Path %s does not exist. Exiting now' %args.path
sys.exit(-1)
nameList = loadClassNamesOrSetNew(args.path,args.classes)
# nameList = ['Boden', 'Wasser', 'Bodenholz', 'Tanne', 'Fichte', 'Kiefer', 'Blattlos', 'DontCare']
classMax = np.min((len(nameList),MAX_CLASSES)) #limit this
nameList = nameList[0:classMax]
imgBackbone = ImageBackbone(args.path,MAX_CLASSES,IMG_SIZE_Y,IMG_SIZE_X,#let the backbone always work up to MAX_CLASSES so that wrong user input can not destroy once set labels
DEFAULT_BACKGROUND_ALPHA, DEFAULT_MARKER_ALPHA, DEFAULT_WATERSHED_ALPHA,
MARKER_NEUTRAL_COLOR,MARKER_NEUTRAL_CLASS)
myapp = AnnotationWindow(imgBackbone,nameList)
myapp.show()
sys.exit(app.exec_())
class test(unittest.TestCase):
def testImageSetFilling(self):
s = 'foo'
tmpDir = 'test_tmp'
if not os.path.exists(tmpDir):
os.makedirs(tmpDir)
fileList = ['img001.png', 'img001_marker.png','img002.png','img003.png', 'img003_marker.png',
'img004_marker.png']
for fl in fileList:
filename = '%s/%s' % (tmpDir,fl)
with open(filename, "w") as f:
f.write("FOOBAR")
imgBackbone = ImageBackbone(tmpDir,MAX_CLASSES,IMG_SIZE_Y,IMG_SIZE_X,#let the backbone always work up to MAX_CLASSES so that wrong user input can not destroy once set labels
DEFAULT_BACKGROUND_ALPHA, DEFAULT_MARKER_ALPHA, DEFAULT_WATERSHED_ALPHA,
MARKER_NEUTRAL_COLOR,MARKER_NEUTRAL_CLASS)
#test assertions
self.assertEqual(len(imgBackbone.imageSetList),3)
imgSetList = imgBackbone.imageSetList
self.assertIsNotNone(find_filter(imgSetList,'img001'))
self.assertIsNotNone(find_filter(imgSetList,'img003')[0].markerFile)
self.assertEqual(len(find_filter(imgSetList,'img004')),0)
#clean up
for fl in fileList:
os.remove('%s/%s' % (tmpDir,fl))
os.removedirs(tmpDir)
def testQImageConversion(self):
imgBackbone = ImageBackbone('foo',MAX_CLASSES,IMG_SIZE_Y,IMG_SIZE_X,#let the backbone always work up to MAX_CLASSES so that wrong user input can not destroy once set labels
DEFAULT_BACKGROUND_ALPHA, DEFAULT_MARKER_ALPHA, DEFAULT_WATERSHED_ALPHA,
MARKER_NEUTRAL_COLOR,MARKER_NEUTRAL_CLASS)
testImg = np.zeros((IMG_SIZE_X, IMG_SIZE_Y, 4), np.uint8)
testImg[20:100,20:100,:] = 10
testImg[20:-100,20:-100,:] = 255
qImg = convertARGBarrayToQImage(testImg)
testImgConverted = convertQImgToArray(qImg)
self.assertTrue( np.all(testImg.shape==testImgConverted.shape) )
self.assertTrue( np.all(testImg==testImgConverted) )
# a = convertQImgToArray(self.markerImgRGB)
# b = convertARGBArrayToIndexArray(a)
# c = convertIndexArrayToARGBArray(b,150)
# d = convertARGBarrayToQImage(c)
indexImg = np.zeros((IMG_SIZE_X, IMG_SIZE_Y), np.uint8)
indexImg[20:-100,20:100] = 10
indexImg[20:-100, 20:100] = 1
rgbArr = imgBackbone.convertIndexArrayToARGBArray(indexImg, DEFAULT_MARKER_ALPHA)
indexCmp = imgBackbone.convertARGBArrayToIndexArray(rgbArr)
self.assertTrue(np.all(indexImg.shape == indexCmp.shape))
self.assertEqual(np.argwhere(indexImg != indexCmp).shape[0],0) | gpl-3.0 |
Novasoft-India/OperERP-AM-Motors | openerp/addons/portal_hr_employees/__openerp__.py | 53 | 1751 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal HR employees',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds a list of employees to your portal's contact page if hr and portal_crm (which creates the contact page) are installed.
=======================================================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['hr','portal_crm'],
'data': [
'hr_employee_view.xml',
'security/ir.model.access.csv',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
'css': ['static/src/css/portal_hr_employees.css'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
spandey2405/phpmyadmin | doc/_ext/configext.py | 141 | 6618 | from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.domains.std import GenericObject, StandardDomain
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import clean_astext, make_refnode
from sphinx.util import ws_re
from sphinx import addnodes
from sphinx.util.docfields import Field
from docutils import nodes
def get_id_from_cfg(text):
'''
Formats anchor ID from config option.
'''
if text[:6] == '$cfg[\'':
text = text[6:]
if text[-2:] == '\']':
text = text[:-2]
text = text.replace('[$i]', '')
parts = text.split("']['")
return parts
class ConfigOption(ObjectDescription):
indextemplate = 'configuration option; %s'
parse_node = None
has_arguments = True
doc_field_types = [
Field('default', label='Default value', has_arg=False,
names=('default', )),
Field('type', label='Type', has_arg=False,
names=('type',)),
]
def handle_signature(self, sig, signode):
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetparts = get_id_from_cfg(name)
targetname = 'cfg_%s' % '_'.join(targetparts)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
indextype = 'single'
# Generic index entries
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname))
self.indexnode['entries'].append((indextype, name,
targetname, targetname))
# Server section
if targetparts[0] == 'Servers' and len(targetparts) > 1:
indexname = ', '.join(targetparts[1:])
self.indexnode['entries'].append((indextype, 'server configuration; %s' % indexname,
targetname, targetname))
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname))
else:
indexname = ', '.join(targetparts)
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigSectionXRefRole(XRefRole):
"""
Cross-referencing role for configuration sections (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname),
('single', 'configuration section; %s' % varname, tgtid, varname)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigSection(ObjectDescription):
indextemplate = 'configuration section; %s'
parse_node = None
def handle_signature(self, sig, signode):
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
if self.indextemplate:
colon = self.indextemplate.find(':')
if colon != -1:
indextype = self.indextemplate[:colon].strip()
indexentry = self.indextemplate[colon+1:].strip() % (name,)
else:
indextype = 'single'
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigOptionXRefRole(XRefRole):
"""
Cross-referencing role for configuration options (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname),
('single', 'configuration option; %s' % varname, tgtid, varname)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigFileDomain(Domain):
name = 'config'
label = 'Config'
object_types = {
'option': ObjType('config option', 'option'),
'section': ObjType('config section', 'section'),
}
directives = {
'option': ConfigOption,
'section': ConfigSection,
}
roles = {
'option': ConfigOptionXRefRole(),
'section': ConfigSectionXRefRole(),
}
initial_data = {
'objects': {}, # (type, name) -> docname, labelid
}
def clear_doc(self, docname):
for key, (fn, _) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][key]
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
docname, labelid = self.data['objects'].get((typ, target), ('', ''))
if not docname:
return None
else:
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def get_objects(self):
for (type, name), info in self.data['objects'].items():
yield (name, name, type, info[0], info[1],
self.object_types[type].attrs['searchprio'])
def setup(app):
app.add_domain(ConfigFileDomain)
| gpl-2.0 |
rynomster/django | tests/template_tests/syntax_tests/test_numpy.py | 353 | 1429 | import warnings
from unittest import skipIf
from django.test import SimpleTestCase
from ..utils import setup
try:
import numpy
except ImportError:
numpy = False
@skipIf(numpy is False, "Numpy must be installed to run these tests.")
class NumpyTests(SimpleTestCase):
# Ignore numpy deprecation warnings (#23890)
warnings.filterwarnings(
"ignore",
"Using a non-integer number instead of an "
"integer will result in an error in the future",
DeprecationWarning
)
@setup({'numpy-array-index01': '{{ var.1 }}'})
def test_numpy_array_index01(self):
"""
Numpy's array-index syntax allows a template to access a certain
item of a subscriptable object.
"""
output = self.engine.render_to_string(
'numpy-array-index01',
{'var': numpy.array(["first item", "second item"])},
)
self.assertEqual(output, 'second item')
@setup({'numpy-array-index02': '{{ var.5 }}'})
def test_numpy_array_index02(self):
"""
Fail silently when the array index is out of range.
"""
output = self.engine.render_to_string(
'numpy-array-index02',
{'var': numpy.array(["first item", "second item"])},
)
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
| bsd-3-clause |
savoirfairelinux/account-fiscal-rule | __unported__/account_fiscal_position_rule_purchase/purchase.py | 2 | 3496 | # -*- encoding: utf-8 -*-
###############################################################################
#
# account_fiscal_position_rule_purchase for OpenERP
# Copyright (C) 2009-TODAY Akretion <http://www.akretion.com>
# @author Renato Lima <renato.lima@akretion.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from osv import osv
class purchase_order(osv.Model):
_inherit = 'purchase.order'
def _fiscal_position_map(self, cr, uid, result, **kwargs):
if not kwargs.get('context', False):
kwargs['context'] = {}
kwargs['context'].update({'use_domain': ('use_purchase', '=', True)})
fp_rule_obj = self.pool.get('account.fiscal.position.rule')
return fp_rule_obj.apply_fiscal_mapping(cr, uid, result, **kwargs)
def onchange_partner_id(self, cr, uid, ids, partner_id, company_id=None,
context=None, **kwargs):
if not context:
context = {}
result = super(purchase_order, self).onchange_partner_id(
cr, uid, ids, partner_id)
if not partner_id or not company_id:
return result
kwargs.update({
'company_id': company_id,
'partner_id': partner_id,
'partner_invoice_id': partner_id,
'partner_shipping_id': partner_id,
'context': context
})
return self._fiscal_position_map(cr, uid, result, **kwargs)
def onchange_dest_address_id(self, cr, uid, ids, partner_id,
dest_address_id, company_id=None,
context=None, **kwargs):
if not context:
context = {}
result = {'value': {'fiscal_position': False}}
if not partner_id or not company_id:
return result
kwargs.update({
'company_id': company_id,
'partner_id': partner_id,
'partner_invoice_id': partner_id,
'partner_shipping_id': dest_address_id,
'context': context
})
return self._fiscal_position_map(cr, uid, result, **kwargs)
def onchange_company_id(self, cr, uid, ids, partner_id,
dest_address_id=False, company_id=False,
context=None, **kwargs):
if not context:
context = {}
result = {'value': {'fiscal_position': False}}
if not partner_id or not company_id:
return result
kwargs.update({
'company_id': company_id,
'partner_id': partner_id,
'partner_invoice_id': partner_id,
'partner_shipping_id': dest_address_id,
'context': context
})
return self._fiscal_position_map(cr, uid, result, **kwargs)
| agpl-3.0 |
krikru/tensorflow-opencl | tensorflow/contrib/tensor_forest/hybrid/python/models/decisions_to_data_then_nn_test.py | 101 | 4681 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import decisions_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class DecisionsToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
learning_rate=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
def testHParams(self):
self.assertEquals(self.params.num_classes, 2)
self.assertEquals(self.params.num_features, 31)
self.assertEquals(self.params.layer_size, 11)
self.assertEquals(self.params.num_layers, 13)
self.assertEquals(self.params.num_trees, 17)
self.assertEquals(self.params.hybrid_tree_depth, 4)
self.assertEquals(self.params.connection_probability, 0.1)
# Building the graphs modifies the params.
with variable_scope.variable_scope("DecisionsToDataThenNNTest_testHParams"):
# pylint: disable=W0612
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
# Tree with depth 4 should have 2**0 + 2**1 + 2**2 + 2**3 = 15 nodes.
self.assertEquals(self.params.num_nodes, 15)
def testConstructionPollution(self):
"""Ensure that graph building doesn't modify the params in a bad way."""
# pylint: disable=W0612
data = [[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)]
self.assertTrue(isinstance(self.params, tensor_forest.ForestHParams))
self.assertFalse(
isinstance(self.params.num_trees, tensor_forest.ForestHParams))
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testContructionPollution"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
self.assertTrue(isinstance(self.params, tensor_forest.ForestHParams))
self.assertFalse(
isinstance(self.params.num_trees, tensor_forest.ForestHParams))
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testInferenceContruction"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testTrainingContruction"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
nlgcoin/guldencoin-official | test/functional/feature_minchainwork.py | 2 | 4129 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(GuldenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generatetoaddress(num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
###assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
###assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
###assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| mit |
Venturi/cms | env/lib/python2.7/site-packages/unidecode/x093.py | 252 | 4666 | data = (
'Lun ', # 0x00
'Kua ', # 0x01
'Ling ', # 0x02
'Bei ', # 0x03
'Lu ', # 0x04
'Li ', # 0x05
'Qiang ', # 0x06
'Pou ', # 0x07
'Juan ', # 0x08
'Min ', # 0x09
'Zui ', # 0x0a
'Peng ', # 0x0b
'An ', # 0x0c
'Pi ', # 0x0d
'Xian ', # 0x0e
'Ya ', # 0x0f
'Zhui ', # 0x10
'Lei ', # 0x11
'A ', # 0x12
'Kong ', # 0x13
'Ta ', # 0x14
'Kun ', # 0x15
'Du ', # 0x16
'Wei ', # 0x17
'Chui ', # 0x18
'Zi ', # 0x19
'Zheng ', # 0x1a
'Ben ', # 0x1b
'Nie ', # 0x1c
'Cong ', # 0x1d
'Qun ', # 0x1e
'Tan ', # 0x1f
'Ding ', # 0x20
'Qi ', # 0x21
'Qian ', # 0x22
'Zhuo ', # 0x23
'Qi ', # 0x24
'Yu ', # 0x25
'Jin ', # 0x26
'Guan ', # 0x27
'Mao ', # 0x28
'Chang ', # 0x29
'Tian ', # 0x2a
'Xi ', # 0x2b
'Lian ', # 0x2c
'Tao ', # 0x2d
'Gu ', # 0x2e
'Cuo ', # 0x2f
'Shu ', # 0x30
'Zhen ', # 0x31
'Lu ', # 0x32
'Meng ', # 0x33
'Lu ', # 0x34
'Hua ', # 0x35
'Biao ', # 0x36
'Ga ', # 0x37
'Lai ', # 0x38
'Ken ', # 0x39
'Kazari ', # 0x3a
'Bu ', # 0x3b
'Nai ', # 0x3c
'Wan ', # 0x3d
'Zan ', # 0x3e
'[?] ', # 0x3f
'De ', # 0x40
'Xian ', # 0x41
'[?] ', # 0x42
'Huo ', # 0x43
'Liang ', # 0x44
'[?] ', # 0x45
'Men ', # 0x46
'Kai ', # 0x47
'Ying ', # 0x48
'Di ', # 0x49
'Lian ', # 0x4a
'Guo ', # 0x4b
'Xian ', # 0x4c
'Du ', # 0x4d
'Tu ', # 0x4e
'Wei ', # 0x4f
'Cong ', # 0x50
'Fu ', # 0x51
'Rou ', # 0x52
'Ji ', # 0x53
'E ', # 0x54
'Rou ', # 0x55
'Chen ', # 0x56
'Ti ', # 0x57
'Zha ', # 0x58
'Hong ', # 0x59
'Yang ', # 0x5a
'Duan ', # 0x5b
'Xia ', # 0x5c
'Yu ', # 0x5d
'Keng ', # 0x5e
'Xing ', # 0x5f
'Huang ', # 0x60
'Wei ', # 0x61
'Fu ', # 0x62
'Zhao ', # 0x63
'Cha ', # 0x64
'Qie ', # 0x65
'She ', # 0x66
'Hong ', # 0x67
'Kui ', # 0x68
'Tian ', # 0x69
'Mou ', # 0x6a
'Qiao ', # 0x6b
'Qiao ', # 0x6c
'Hou ', # 0x6d
'Tou ', # 0x6e
'Cong ', # 0x6f
'Huan ', # 0x70
'Ye ', # 0x71
'Min ', # 0x72
'Jian ', # 0x73
'Duan ', # 0x74
'Jian ', # 0x75
'Song ', # 0x76
'Kui ', # 0x77
'Hu ', # 0x78
'Xuan ', # 0x79
'Duo ', # 0x7a
'Jie ', # 0x7b
'Zhen ', # 0x7c
'Bian ', # 0x7d
'Zhong ', # 0x7e
'Zi ', # 0x7f
'Xiu ', # 0x80
'Ye ', # 0x81
'Mei ', # 0x82
'Pai ', # 0x83
'Ai ', # 0x84
'Jie ', # 0x85
'[?] ', # 0x86
'Mei ', # 0x87
'Chuo ', # 0x88
'Ta ', # 0x89
'Bang ', # 0x8a
'Xia ', # 0x8b
'Lian ', # 0x8c
'Suo ', # 0x8d
'Xi ', # 0x8e
'Liu ', # 0x8f
'Zu ', # 0x90
'Ye ', # 0x91
'Nou ', # 0x92
'Weng ', # 0x93
'Rong ', # 0x94
'Tang ', # 0x95
'Suo ', # 0x96
'Qiang ', # 0x97
'Ge ', # 0x98
'Shuo ', # 0x99
'Chui ', # 0x9a
'Bo ', # 0x9b
'Pan ', # 0x9c
'Sa ', # 0x9d
'Bi ', # 0x9e
'Sang ', # 0x9f
'Gang ', # 0xa0
'Zi ', # 0xa1
'Wu ', # 0xa2
'Ying ', # 0xa3
'Huang ', # 0xa4
'Tiao ', # 0xa5
'Liu ', # 0xa6
'Kai ', # 0xa7
'Sun ', # 0xa8
'Sha ', # 0xa9
'Sou ', # 0xaa
'Wan ', # 0xab
'Hao ', # 0xac
'Zhen ', # 0xad
'Zhen ', # 0xae
'Luo ', # 0xaf
'Yi ', # 0xb0
'Yuan ', # 0xb1
'Tang ', # 0xb2
'Nie ', # 0xb3
'Xi ', # 0xb4
'Jia ', # 0xb5
'Ge ', # 0xb6
'Ma ', # 0xb7
'Juan ', # 0xb8
'Kasugai ', # 0xb9
'Habaki ', # 0xba
'Suo ', # 0xbb
'[?] ', # 0xbc
'[?] ', # 0xbd
'[?] ', # 0xbe
'Na ', # 0xbf
'Lu ', # 0xc0
'Suo ', # 0xc1
'Ou ', # 0xc2
'Zu ', # 0xc3
'Tuan ', # 0xc4
'Xiu ', # 0xc5
'Guan ', # 0xc6
'Xuan ', # 0xc7
'Lian ', # 0xc8
'Shou ', # 0xc9
'Ao ', # 0xca
'Man ', # 0xcb
'Mo ', # 0xcc
'Luo ', # 0xcd
'Bi ', # 0xce
'Wei ', # 0xcf
'Liu ', # 0xd0
'Di ', # 0xd1
'Qiao ', # 0xd2
'Cong ', # 0xd3
'Yi ', # 0xd4
'Lu ', # 0xd5
'Ao ', # 0xd6
'Keng ', # 0xd7
'Qiang ', # 0xd8
'Cui ', # 0xd9
'Qi ', # 0xda
'Chang ', # 0xdb
'Tang ', # 0xdc
'Man ', # 0xdd
'Yong ', # 0xde
'Chan ', # 0xdf
'Feng ', # 0xe0
'Jing ', # 0xe1
'Biao ', # 0xe2
'Shu ', # 0xe3
'Lou ', # 0xe4
'Xiu ', # 0xe5
'Cong ', # 0xe6
'Long ', # 0xe7
'Zan ', # 0xe8
'Jian ', # 0xe9
'Cao ', # 0xea
'Li ', # 0xeb
'Xia ', # 0xec
'Xi ', # 0xed
'Kang ', # 0xee
'[?] ', # 0xef
'Beng ', # 0xf0
'[?] ', # 0xf1
'[?] ', # 0xf2
'Zheng ', # 0xf3
'Lu ', # 0xf4
'Hua ', # 0xf5
'Ji ', # 0xf6
'Pu ', # 0xf7
'Hui ', # 0xf8
'Qiang ', # 0xf9
'Po ', # 0xfa
'Lin ', # 0xfb
'Suo ', # 0xfc
'Xiu ', # 0xfd
'San ', # 0xfe
'Cheng ', # 0xff
)
| gpl-2.0 |
pablohoffman/scrapy | scrapy/xlib/pydispatch/robust.py | 25 | 1857 | """Module implementing error-catching version of send (sendRobust)"""
from scrapy.xlib.pydispatch.dispatcher import Any, Anonymous, liveReceivers, getAllReceivers
from scrapy.xlib.pydispatch.robustapply import robustApply
def sendRobust(
signal=Any,
sender=Anonymous,
*arguments, **named
):
"""Send signal from sender to all connected receivers catching errors
signal -- (hashable) signal value, see connect for details
sender -- the sender of the signal
if Any, only receivers registered for Any will receive
the message.
if Anonymous, only receivers registered to receive
messages from Anonymous or Any will receive the message
Otherwise can be any python object (normally one
registered with a connect if you actually want
something to occur).
arguments -- positional arguments which will be passed to
*all* receivers. Note that this may raise TypeErrors
if the receivers do not allow the particular arguments.
Note also that arguments are applied before named
arguments, so they should be used with care.
named -- named arguments which will be filtered according
to the parameters of the receivers to only provide those
acceptable to the receiver.
Return a list of tuple pairs [(receiver, response), ... ]
if any receiver raises an error (specifically any subclass of Exception),
the error instance is returned as the result for that receiver.
"""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
try:
response = robustApply(
receiver,
signal=signal,
sender=sender,
*arguments,
**named
)
except Exception, err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
| bsd-3-clause |
luzpaz/QGIS | tests/src/python/test_qgsopacitywidget.py | 45 | 1458 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsOpacityWidget
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '30/05/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.gui import QgsOpacityWidget
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
start_app()
class TestQgsOpacityWidget(unittest.TestCase):
def testGettersSetters(self):
""" test widget getters/setters """
w = qgis.gui.QgsOpacityWidget()
w.setOpacity(0.2)
self.assertEqual(w.opacity(), 0.2)
# bad values
w.setOpacity(-0.2)
self.assertEqual(w.opacity(), 0.0)
w.setOpacity(100)
self.assertEqual(w.opacity(), 1.0)
def test_ChangedSignals(self):
""" test that signals are correctly emitted when setting opacity"""
w = qgis.gui.QgsOpacityWidget()
spy = QSignalSpy(w.opacityChanged)
w.setOpacity(0.2)
self.assertEqual(len(spy), 1)
self.assertEqual(spy[0][0], 0.2)
# bad value
w.setOpacity(100)
self.assertEqual(len(spy), 2)
self.assertEqual(spy[1][0], 1.0)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
dkandalov/katas | python/ml/scikit/plot_iris.py | 1 | 1811 | import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
iris = datasets.load_iris()
# we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# We create an instance of SVM and fit out data.
# We do not scale our data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
linear_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, linear_svc, rbf_svc, poly_svc)):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| unlicense |
powerjg/gem5-ci-test | src/mem/slicc/ast/NewExprAST.py | 91 | 2107 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.ExprAST import ExprAST
class NewExprAST(ExprAST):
def __init__(self, slicc, type_ast):
super(NewExprAST, self).__init__(slicc)
self.type_ast = type_ast
def __repr__(self):
return "[NewExprAST: %r]" % self.type_ast
@property
def name(self):
return str(self.type_ast)
def generate(self, code):
type = self.type_ast.type
fix = code.nofix()
code("new ${{type.c_ident}}")
code.fix(fix)
return type
| bsd-3-clause |
jonathonwalz/ansible | lib/ansible/modules/windows/win_chocolatey.py | 60 | 3973 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Trond Hindenes <trond@hindenes.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = r'''
---
module: win_chocolatey
version_added: "1.9"
short_description: Installs packages using chocolatey
description:
- Installs packages using Chocolatey (U(http://chocolatey.org/)).
- If Chocolatey is missing from the system, the module will install it.
- List of packages can be found at U(http://chocolatey.org/packages)
options:
name:
description:
- Name of the package to be installed.
required: true
state:
description:
- State of the package on the system.
choices:
- present
- absent
- latest
- reinstalled
default: present
force:
description:
- Forces install of the package (even if it already exists).
- Using C(force) will cause ansible to always report that a change was made.
choices:
- yes
- no
default: no
upgrade:
description:
- If package is already installed it, try to upgrade to the latest version or to the specified version.
- As of Ansible v2.3 this is deprecated, set parameter C(state) to "latest" for the same result.
choices:
- yes
- no
default: no
version:
description:
- Specific version of the package to be installed.
- Ignored when C(state) is set to "absent".
source:
description:
- Specify source rather than using default chocolatey repository.
install_args:
description:
- Arguments to pass to the native installer.
version_added: '2.1'
params:
description:
- Parameters to pass to the package
version_added: '2.1'
allow_empty_checksums:
description:
- Allow empty checksums to be used.
default: false
version_added: '2.2'
ignore_checksums:
description:
- Ignore checksums altogether.
default: false
version_added: '2.2'
ignore_dependencies:
description:
- Ignore dependencies, only install/upgrade the package itself.
default: false
version_added: '2.1'
timeout:
description:
- The time to allow chocolatey to finish before timing out.
default: 2700
version_added: '2.3'
aliases: [ execution_timeout ]
author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)"
'''
# TODO:
# * Better parsing when a package has dependencies - currently fails
# * Time each item that is run
# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
EXAMPLES = r'''
# Install git
win_chocolatey:
name: git
state: present
# Upgrade installed packages
win_chocolatey:
name: all
state: latest
# Install notepadplusplus version 6.6
win_chocolatey:
name: notepadplusplus.install
version: '6.6'
# Install git from specified repository
win_chocolatey:
name: git
source: https://someserver/api/v2/
# Uninstall git
win_chocolatey:
name: git
state: absent
'''
| gpl-3.0 |
zadgroup/edx-platform | cms/envs/dev_with_worker.py | 127 | 1180 | """
This config file follows the dev enviroment, but adds the
requirement of a celery worker running in the background to process
celery tasks.
The worker can be executed using:
django_admin.py celery worker
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from dev import *
################################# CELERY ######################################
# Requires a separate celery worker
CELERY_ALWAYS_EAGER = False
# Use django db as the broker and result store
BROKER_URL = 'django://'
INSTALLED_APPS += ('djcelery.transport', )
CELERY_RESULT_BACKEND = 'database'
DJKOMBU_POLLING_INTERVAL = 1.0
# Disable transaction management because we are using a worker. Views
# that request a task and wait for the result will deadlock otherwise.
MIDDLEWARE_CLASSES = tuple(
c for c in MIDDLEWARE_CLASSES
if c != 'django.middleware.transaction.TransactionMiddleware')
# Note: other alternatives for disabling transactions don't work in 1.4
# https://code.djangoproject.com/ticket/2304
# https://code.djangoproject.com/ticket/16039
| agpl-3.0 |
raycarnes/account-invoicing | __unported__/stock_invoice_picking_incoterm/account_invoice.py | 20 | 1333 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class account_invoice(orm.Model):
_inherit = "account.invoice"
_columns = {
'incoterm': fields.many2one(
'stock.incoterms',
'Incoterm',
help="International Commercial Terms are a series of predefined "
"commercial terms used in international transactions."
),
}
| agpl-3.0 |
choderalab/bayesian-itc | examples/sampl4_notebook/models.py | 3 | 42066 | #!/usr/bin/env python
"""
A test of pymc for ITC.
"""
#=============================================================================================
# IMPORTS
#=============================================================================================
import numpy
import pymc
import copy
import scipy.optimize
import scipy.integrate
from math import sqrt, exp, log
#=============================================================================================
# Physical constants
#=============================================================================================
Na = 6.02214179e23 # Avogadro's number (number/mol)
kB = Na * 1.3806504e-23 / 4184.0 # Boltzmann constant (kcal/mol/K)
C0 = 1.0 # standard concentration (M)
#=============================================================================================
# Rescaling StepMethod for sampling correlated changes in ligand and receptor concentration
#=============================================================================================
class RescalingStep(pymc.StepMethod):
def __init__(self, dictionary, beta, max_scale=1.03, verbose=0, interval=100):
"""
dictionary (dict) - must contain dictionary of objects for Ls, P0, DeltaH, DeltaG
"""
# Verbosity flag
self.verbose = verbose
# Store stochastics.
self.dictionary = dictionary
# Initialize superclass.
pymc.StepMethod.__init__(self, dictionary.values(), verbose)
self._id = 'RescalingMetropolis_'+'_'.join([p.__name__ for p in self.stochastics])
# State variables used to restore the state in a later session.
self._state += ['max_scale', '_current_iter', 'interval']
self.max_scale = max_scale
self.beta = beta
self._current_iter = 0
self.interval = interval
self.accepted = 0
self.rejected = 0
# Report
if self.verbose:
print "Initialization..."
print "max_scale: ", self.max_scale
def propose(self):
# Choose trial scaling factor or its inverse with equal probability, so that proposal move is symmetric.
factor = (self.max_scale - 1) * numpy.random.rand() + 1;
if (numpy.random.rand() < 0.5):
factor = 1./factor;
# Scale thermodynamic parameters and variables with this factor.
self.dictionary['Ls'].value = self.dictionary['Ls'].value * factor
self.dictionary['P0'].value = self.dictionary['P0'].value * factor
self.dictionary['DeltaH'].value = self.dictionary['DeltaH'].value / factor
self.dictionary['DeltaG'].value = self.dictionary['DeltaG'].value + (1./self.beta) * numpy.log(factor);
return
def step(self):
# Probability and likelihood for stochastic's current value:
logp = sum([stochastic.logp for stochastic in self.stochastics])
loglike = self.loglike
if self.verbose > 1:
print 'Current likelihood: ', logp+loglike
# Sample a candidate value
self.propose()
# Metropolis acception/rejection test
accept = False
try:
# Probability and likelihood for stochastic's proposed value:
logp_p = sum([stochastic.logp for stochastic in self.stochastics])
loglike_p = self.loglike
if self.verbose > 2:
print 'Current likelihood: ', logp+loglike
if numpy.log(numpy.random.rand()) < logp_p + loglike_p - logp - loglike:
accept = True
self.accepted += 1
if self.verbose > 2:
print 'Accepted'
else:
self.rejected += 1
if self.verbose > 2:
print 'Rejected'
except pymc.ZeroProbability:
self.rejected += 1
logp_p = None
loglike_p = None
if self.verbose > 2:
print 'Rejected with ZeroProbability error.'
if (not self._current_iter % self.interval) and self.verbose > 1:
print "Step ", self._current_iter
print "\tLogprobability (current, proposed): ", logp, logp_p
print "\tloglike (current, proposed): : ", loglike, loglike_p
for stochastic in self.stochastics:
print "\t", stochastic.__name__, stochastic.last_value, stochastic.value
if accept:
print "\tAccepted\t*******\n"
else:
print "\tRejected\n"
print "\tAcceptance ratio: ", self.accepted/(self.accepted+self.rejected)
if not accept:
self.reject()
self._current_iter += 1
return
@classmethod
def competence(self, stochastic):
if str(stochastic) in ['DeltaG', 'DeltaH', 'DeltaH_0', 'Ls', 'P0']:
return 1
return 0
def reject(self):
for stochastic in self.stochastics:
# stochastic.value = stochastic.last_value
stochastic.revert()
def tune(self, verbose):
return False
#=============================================================================================
# Binding models
#=============================================================================================
class BindingModel(object):
"""
Abstract base class for reaction models.
"""
def __init__(self):
pass
#=============================================================================================
# Two-component binding model
#=============================================================================================
class TwoComponentBindingModel(BindingModel):
def __init__(self, Ls_stated, P0_stated, q_n_observed, DeltaVn, temperature, V0):
# Determine number of observations.
self.N = q_n_observed.size
# Store injection volumes
if not numpy.iterable(DeltaVn):
self.DeltaVn = numpy.ones([self.N], numpy.float64) * DeltaVn
else:
self.DeltaVn = numpy.array(DeltaVn)
# Store calorimeter properties.
self.V0 = V0
# Store temperature.
self.temperature = temperature # temperature (kelvin)
self.beta = 1.0 / (kB * temperature) # inverse temperature 1/(kcal/mol)
# Compute uncertainties in stated concentrations.
dP0 = 0.10 * P0_stated # uncertainty in protein stated concentration (M) - 10% error
dLs = 0.10 * Ls_stated # uncertainty in ligand stated concentration (M) - 10% error
# Determine guesses for initial values
log_sigma_guess = log(q_n_observed[-4:].std()) # cal/injection
DeltaG_guess = -8.3 # kcal/mol
DeltaH_guess = -12.0 # kcal/mol
DeltaH_0_guess = q_n_observed[-1] # cal/injection
# Determine min and max range for log_sigma
log_sigma_min = log_sigma_guess - 10
log_sigma_max = log_sigma_guess + 5
# Determine range for priors for thermodynamic parameters.
DeltaG_min = -40. # (kcal/mol)
DeltaG_max = +40. # (kcal/mol)
DeltaH_min = -100. # (kcal/mol)
DeltaH_max = +100. # (kcal/mol)
heat_interval = q_n_observed.max() - q_n_observed.min()
DeltaH_0_min = q_n_observed.min() - heat_interval # (cal/mol)
DeltaH_0_max = q_n_observed.max() + heat_interval # (cal/mol)
# Define priors for concentrations.
#self.P0 = pymc.Normal('P0', mu=P0_stated, tau=1.0/dP0**2, value=P0_stated)
#self.Ls = pymc.Normal('Ls', mu=Ls_stated, tau=1.0/dLs**2, value=Ls_stated)
self.P0 = pymc.Lognormal('P0', mu=log(P0_stated), tau=1.0/log(1.0+(dP0/P0_stated)**2), value=P0_stated)
self.Ls = pymc.Lognormal('Ls', mu=log(Ls_stated), tau=1.0/log(1.0+(dLs/Ls_stated)**2), value=Ls_stated)
# Define priors for thermodynamic quantities.
self.log_sigma = pymc.Uniform('log_sigma', lower=log_sigma_min, upper=log_sigma_max, value=log_sigma_guess)
self.DeltaG = pymc.Uniform('DeltaG', lower=DeltaG_min, upper=DeltaG_max, value=DeltaG_guess)
self.DeltaH = pymc.Uniform('DeltaH', lower=DeltaH_min, upper=DeltaH_max, value=DeltaH_guess)
self.DeltaH_0 = pymc.Uniform('DeltaH_0', lower=DeltaH_0_min, upper=DeltaH_0_max, value=DeltaH_0_guess)
# Deterministic functions.
q_n_model = pymc.Lambda('q_n_model', lambda P0=self.P0, Ls=self.Ls, DeltaG=self.DeltaG, DeltaH=self.DeltaH, DeltaH_0=self.DeltaH_0, q_n_obs=self.DeltaH_0 :
self.expected_injection_heats(P0, Ls, DeltaG, DeltaH, DeltaH_0, q_n_obs))
tau = pymc.Lambda('tau', lambda log_sigma=self.log_sigma : self.tau(log_sigma))
# Define observed data.
self.q_n_obs = pymc.Normal('q_n', mu=q_n_model, tau=tau, observed=True, value=q_n_observed)
# Create sampler.
mcmc = pymc.MCMC(self, db='ram')
mcmc.use_step_method(pymc.Metropolis, self.DeltaG)
mcmc.use_step_method(pymc.Metropolis, self.DeltaH)
mcmc.use_step_method(pymc.Metropolis, self.DeltaH_0)
mcmc.use_step_method(pymc.Metropolis, self.P0)
mcmc.use_step_method(pymc.Metropolis, self.Ls)
mcmc.use_step_method(RescalingStep, { 'Ls' : self.Ls, 'P0' : self.P0, 'DeltaH' : self.DeltaH, 'DeltaG' : self.DeltaG }, self.beta)
self.mcmc = mcmc
return
def expected_injection_heats(self, P0, Ls, DeltaG, DeltaH, DeltaH_0, q_n_obs):
"""
Expected heats of injection for two-component binding model.
ARGUMENTS
DeltaG - free energy of binding (kcal/mol)
DeltaH - enthalpy of binding (kcal/mol)
DeltaH_0 - heat of injection (cal/mol)
"""
debug = False
Kd = exp(self.beta * DeltaG) * C0 # dissociation constant (M)
N = self.N
# Compute complex concentrations.
Pn = numpy.zeros([N], numpy.float64) # Pn[n] is the protein concentration in sample cell after n injections (M)
Ln = numpy.zeros([N], numpy.float64) # Ln[n] is the ligand concentration in sample cell after n injections (M)
PLn = numpy.zeros([N], numpy.float64) # PLn[n] is the complex concentration in sample cell after n injections (M)
dcum = 1.0 # cumulative dilution factor (dimensionless)
for n in range(N):
# Instantaneous injection model (perfusion)
# TODO: Allow injection volume to vary for each injection.
d = 1.0 - (self.DeltaVn[n] / self.V0) # dilution factor for this injection (dimensionless)
dcum *= d # cumulative dilution factor
P = self.V0 * P0 * dcum # total quantity of protein in sample cell after n injections (mol)
L = self.V0 * Ls * (1. - dcum) # total quantity of ligand in sample cell after n injections (mol)
PLn[n] = 0.5/self.V0 * ((P + L + Kd*self.V0) - sqrt((P + L + Kd*self.V0)**2 - 4*P*L)); # complex concentration (M)
Pn[n] = P/self.V0 - PLn[n]; # free protein concentration in sample cell after n injections (M)
Ln[n] = L/self.V0 - PLn[n]; # free ligand concentration in sample cell after n injections (M)
# Compute expected injection heats.
q_n = numpy.zeros([N], numpy.float64) # q_n_model[n] is the expected heat from injection n
# Instantaneous injection model (perfusion)
q_n[0] = -(1000.0*DeltaH) * self.V0 * PLn[0] - DeltaH_0 # first injection
for n in range(1,N):
d = 1.0 - (self.DeltaVn[n] / self.V0) # dilution factor (dimensionless)
q_n[n] = -(1000.0*DeltaH) * self.V0 * (PLn[n] - d*PLn[n-1]) - DeltaH_0 # subsequent injections
# Debug output
if debug:
print "DeltaG = %6.1f kcal/mol ; DeltaH = %6.1f kcal/mol ; DeltaH_0 = %6.1f ucal/injection" % (DeltaG, DeltaH, DeltaH_0*1e6)
for n in range(N):
print "%6.1f" % (PLn[n]*1e6),
print ""
for n in range(N):
print "%6.1f" % (q_n[n]*1e6),
print ""
for n in range(N):
print "%6.1f" % (q_n_obs[n]*1e6),
print ""
print ""
return q_n
def tau(self, log_sigma):
"""
Injection heat measurement precision.
"""
return exp(-2.0*log_sigma)
#=============================================================================================
# Titration experiment
#=============================================================================================
class Experiment(object):
"""
A calorimetry experiment.
"""
def __init__(self, sample_cell_concentrations, syringe_concentrations, injection_volumes, injection_heats, temperature):
"""
Initialize a calorimetry experiment.
ARGUMENTS
sample_cell_concentrations (dict) - a dictionary of initial concentrations of each species in sample cell (M)
syringe_concentrations (dict) - a dictionary of initial concentrations of each species in the syringe
injection_volumes (list or numpy array of N floats) - injection volumes in L
injection_heats (list or numpy array of N floats) - injection heats in cal/injection
temperature (float) - temperature (K)
EXAMPLES
ABRF-MIRG'02 group 10
>>> V0 = 1.4301e-3 # volume of calorimeter sample cell listed in manual (L)
>>> V0 = V0 - 0.044e-3 # sample cell volume after Tellinghuisen volume correction for VP-ITC (L)
>>> DeltaV = 8.e-6 # injection volume (L)
>>> P0_stated = 32.e-6 # protein stated concentration (M)
>>> Ls_stated = 384.e-6 # ligand syringe stated concentration (M)
>>> injection_heats = numpy.array([-13.343, -13.417, -13.279, -13.199, -13.118, -12.781, -12.600, -12.124, -11.633, -10.921, -10.009, -8.810, -7.661, -6.272, -5.163, -4.228, -3.519, -3.055, -2.599, -2.512, -2.197, -2.096, -2.087, -1.959, -1.776, -1.879, -1.894, -1.813, -1.740, -1.810]) * DeltaV * Ls_stated * 1000.0 # integrated heats of injection (cal/injection)
>>> temperature = 298.15
>>> experiment = Experiment({'CA II' : P0_stated}, {'CBS' : Ls_stated}, injection_volumes, injection_heats, temperature)
"""
# TODO: Do sanity checking to make sure number of injections matches up, etc.
self.sample_cell_concentrations = sample_cell_concentrations
self.syringe_concentrations = syringe_concentrations
self.injection_volumes = numpy.array(injection_volumes)
self.observed_injection_heats = numpy.array(injection_heats)
self.temperature = temperature
return
#=============================================================================================
# Competitive binding model
#=============================================================================================
class CompetitiveBindingModel(BindingModel):
"""
Competitive binding model.
"""
def __init__(self, experiments, receptor, V0, concentration_uncertainty=0.10, verbose=False):
"""
ARGUMENTS
experiments (list of Experiment) -
receptor (string) - name of receptor species
V0 (float) - calorimeter sample cell volume
OPTIONAL ARGUMENTS
concentration_uncertainty (float) - relative uncertainty in concentrations
"""
self.verbose = verbose
# Store temperature.
# NOTE: Right now, there can only be one.
self.temperature = experiments[0].temperature # temperature (kelvin)
self.beta = 1.0 / (kB * self.temperature) # inverse temperature 1/(kcal/mol)
# Store copy of experiments.
self.experiments = copy.deepcopy(experiments)
if verbose: print "%d experiments" % len(self.experiments)
# Store sample cell volume.
self.V0 = V0
# Store the name of the receptor.
self.receptor = receptor
if verbose: print "species '%s' will be treated as receptor" % self.receptor
# Make a list of names of all molecular species.
self.species = set() # all molecular species
for experiment in experiments:
self.species.update( experiment.sample_cell_concentrations.keys() )
self.species.update( experiment.syringe_concentrations.keys() )
if verbose: print "species: ", self.species
# Make a list of all ligands.
self.ligands = copy.deepcopy(self.species)
self.ligands.remove(receptor)
if verbose: print "ligands: ", self.ligands
# Create a list of all stochastics.
self.stochastics = list()
# Create a prior for thermodynamic parameters of binding for each ligand-receptor interaction.
DeltaG_min = -40. # (kcal/mol)
DeltaG_max = +40. # (kcal/mol)
DeltaH_min = -100. # (kcal/mol)
DeltaH_max = +100. # (kcal/mol)
self.thermodynamic_parameters = dict()
for ligand in self.ligands:
name = "DeltaG of %s * %s" % (self.receptor, ligand)
x = pymc.Uniform(name, lower=DeltaG_min, upper=DeltaG_max, value=0.0)
self.thermodynamic_parameters[name] = x
self.stochastics.append(x)
name = "DeltaH of %s * %s" % (self.receptor, ligand)
x = pymc.Uniform(name, lower=DeltaH_min, upper=DeltaH_max, value=0.0)
self.thermodynamic_parameters[name] = x
self.stochastics.append(x)
if verbose:
print "thermodynamic parameters:"
print self.thermodynamic_parameters
# DEBUG: Set initial thermodynamic parameters to literature values.
self.thermodynamic_parameters["DeltaG of HIV protease * acetyl pepstatin"].value = -9.0
self.thermodynamic_parameters["DeltaH of HIV protease * acetyl pepstatin"].value = +6.8
self.thermodynamic_parameters["DeltaG of HIV protease * KNI-10033"].value = -14.870
self.thermodynamic_parameters["DeltaH of HIV protease * KNI-10033"].value = -8.200
self.thermodynamic_parameters["DeltaG of HIV protease * KNI-10075"].value = -14.620
self.thermodynamic_parameters["DeltaH of HIV protease * KNI-10075"].value = -12.120
# Determine min and max range for log_sigma (log of instrument heat measurement error)
# TODO: This should depend on a number of factors, like integration time, heat signal, etc.?
sigma_guess = 0.0
for experiment in self.experiments:
sigma_guess += experiment.observed_injection_heats[-4:].std()
sigma_guess /= float(len(self.experiments))
log_sigma_guess = log(sigma_guess)
log_sigma_min = log_sigma_guess - 10
log_sigma_max = log_sigma_guess + 5
self.log_sigma = pymc.Uniform('log_sigma', lower=log_sigma_min, upper=log_sigma_max, value=log_sigma_guess)
self.stochastics.append(self.log_sigma)
tau = pymc.Lambda('tau', lambda log_sigma=self.log_sigma : exp(-2.0 * log_sigma))
self.stochastics.append(tau)
# Define priors for unknowns for each experiment.
for (index, experiment) in enumerate(self.experiments):
# Number of observations
experiment.ninjections = experiment.observed_injection_heats.size
if verbose: print "Experiment %d has %d injections" % (index, experiment.ninjections)
# Heat of dilution / mixing
# We allow the heat of dilution/mixing to range in observed range of heats, plus a larger margin of the range of oberved heats.
max_heat = experiment.observed_injection_heats.max()
min_heat = experiment.observed_injection_heats.min()
heat_interval = max_heat - min_heat
last_heat = experiment.observed_injection_heats[-1] # last injection heat provides a good initial guess for heat of dilution/mixing
experiment.DeltaH_0 = pymc.Uniform("DeltaH_0 for experiment %d" % index, lower=min_heat-heat_interval, upper=max_heat+heat_interval, value=last_heat)
self.stochastics.append(experiment.DeltaH_0)
# True concentrations
experiment.true_sample_cell_concentrations = dict()
for species, concentration in experiment.sample_cell_concentrations.iteritems():
x = pymc.Lognormal("initial sample cell concentration of %s in experiment %d" % (species, index),
mu=log(concentration), tau=1.0/log(1.0+concentration_uncertainty**2),
value=concentration)
experiment.true_sample_cell_concentrations[species] = x
self.stochastics.append(x)
experiment.true_syringe_concentrations = dict()
for species, concentration in experiment.syringe_concentrations.iteritems():
x = pymc.Lognormal("initial syringe concentration of %s in experiment %d" % (species, index),
mu=log(concentration), tau=1.0/log(1.0+concentration_uncertainty**2),
value=concentration)
experiment.true_syringe_concentrations[species] = x
self.stochastics.append(x)
# Add species not explicitly listed with zero concentration.
for species in self.species:
if species not in experiment.true_sample_cell_concentrations:
experiment.true_sample_cell_concentrations[species] = 0.0
if species not in experiment.true_syringe_concentrations:
experiment.true_syringe_concentrations[species] = 0.0
# True injection heats
experiment.true_injection_heats = pymc.Lambda("true injection heats for experiment %d" % index,
lambda experiment=experiment,
sample_cell_concentrations=experiment.true_sample_cell_concentrations,
syringe_concentrations=experiment.true_syringe_concentrations,
DeltaH_0=experiment.DeltaH_0,
thermodynamic_parameters=self.thermodynamic_parameters :
self.expected_injection_heats(experiment, sample_cell_concentrations, syringe_concentrations, DeltaH_0, thermodynamic_parameters))
self.stochastics.append(experiment.true_injection_heats)
# Observed injection heats
experiment.observation = pymc.Normal("observed injection heats for experiment %d" % index,
mu=experiment.true_injection_heats, tau=tau,
observed=True, value=experiment.observed_injection_heats)
self.stochastics.append(experiment.observation)
# Create sampler.
print "Creating sampler..."
mcmc = pymc.MCMC(self.stochastics, db='ram')
#db = pymc.database.pickle.load('MCMC.pickle') # DEBUG
#mcmc = pymc.MCMC(self.stochastics, db=db)
for stochastic in self.stochastics:
print stochastic
try:
mcmc.use_step_method(pymc.Metropolis, stochastic)
except:
pass
mcmc.use_step_method(RescalingStep, { 'Ls' : self.experiments[0].true_syringe_concentrations['acetyl pepstatin'],
'P0' : self.experiments[0].true_sample_cell_concentrations['HIV protease'],
'DeltaH' : self.thermodynamic_parameters['DeltaH of HIV protease * acetyl pepstatin'],
'DeltaG' : self.thermodynamic_parameters['DeltaG of HIV protease * acetyl pepstatin'] }, self.beta)
mcmc.use_step_method(RescalingStep, { 'Ls' : self.experiments[1].true_syringe_concentrations['KNI-10033'],
'P0' : self.experiments[1].true_sample_cell_concentrations['HIV protease'],
'DeltaH' : self.thermodynamic_parameters['DeltaH of HIV protease * KNI-10033'],
'DeltaG' : self.thermodynamic_parameters['DeltaG of HIV protease * KNI-10033'] }, self.beta)
mcmc.use_step_method(RescalingStep, { 'Ls' : self.experiments[2].true_syringe_concentrations['KNI-10075'],
'P0' : self.experiments[2].true_sample_cell_concentrations['HIV protease'],
'DeltaH' : self.thermodynamic_parameters['DeltaH of HIV protease * KNI-10075'],
'DeltaG' : self.thermodynamic_parameters['DeltaG of HIV protease * KNI-10075'] }, self.beta)
self.mcmc = mcmc
def equilibrium_concentrations(self, Ka_n, C0_R, C0_Ln, V, c0=None):
"""
Compute the equilibrium concentrations of each complex species for N ligands competitively binding to a receptor.
ARGUMENTS
Ka_n (numpy N-array of float) - Ka_n[n] is the association constant for receptor and ligand species n (1/M)
x_R (float) - the total number of moles of receptor in the sample volume
x_n (numpy N-array of float) - x_n[n] is the total number of moles of ligand species n in the sample volume
V (float) - the total sample volume (L)
RETURNS
C_n (numpy N-array of float) - C_n[n] is the concentration of complex of receptor with ligand species n
EXAMPLES
>>> V = 1.4303e-3 # volume (L)
>>> x_R = V * 510.e-3 # receptor
>>> x_Ln = numpy.array([V * 8.6e-6, 200.e-6 * 55.e-6]) # ligands
>>> Ka_n = numpy.array([1./(400.e-9), 1./(2.e-11)]) # association constants
>>> C_PLn = equilibrium_concentrations(Ka_n, x_R, x_Ln, V)
NOTES
Each complex concentration C_n must obey the relation
Ka_n[n] = C_RLn[n] / (C_R * C_Ln[n]) for n = 1..N
with conservation of mass constraints
V * (C_Ln[n] + C_RLn[n]) = x_Ln[n] for n = 1..N
and
V * (C_R + C_RLn[:].sum()) = x_R
along with the constraints
0 <= V * C_RLn[n] <= min(x_Ln[n], x_R) for n = 1..N
V * C_RLn[:].sum() <= x_R
We can rearrange these expressions to give
V * C_R * C_Ln[n] * Ka_n[n] - V * C_RLn[n] = 0
and eliminate C_Ln[n] and C_R to give
V * (x_R/V - C_RLn[:].sum()) * (x_Ln[n]/V - C_RLn[n]) * Ka_n[n] - V * C_RLn[n] = 0 for n = 1..N
"""
x_R = C0_R * V
x_Ln = C0_Ln * V
nspecies = Ka_n.size
#print "x_R = ", x_R
#print "x_Ln = ", x_Ln
#print "x_Ln / V = ", x_Ln / V
#print "Ka_n = ", Ka_n
# Define optimization functions
def func(C_RLn):
f_n = V * (x_R/V - C_RLn[:].sum()) * (x_Ln[:]/V - C_RLn[:]) * Ka_n[:] - V * C_RLn[:]
#print "f_n = ", f_n
return f_n
def fprime(C_RLn):
nspecies = C_RLn.size
G_nm = numpy.zeros([nspecies,nspecies], numpy.float64) # G_nm[n,m] is the derivative of func[n] with respect to C_RLn[m]
for n in range(nspecies):
G_nm[n,:] = - V * (x_Ln[:]/V - C_RLn[:]) * Ka_n[:]
G_nm[n,n] -= V * (Ka_n[n] * (x_R/V - C_RLn[:].sum()) + 1.0)
return G_nm
def sfunc(s):
#print "s = ", s
f_n = V * (x_R/V - (s[:]**2).sum()) * (x_Ln[:]/V - s[:]**2) * Ka_n[:] - V * s[:]**2
#print "f_n = ", f_n
return f_n
def sfprime(s):
nspecies = s.size
G_nm = numpy.zeros([nspecies,nspecies], numpy.float64) # G_nm[n,m] is the derivative of func[n] with respect to C_RLn[m]
for n in range(nspecies):
G_nm[n,:] = - V * (x_Ln[:]/V - s[:]**2) * Ka_n[:]
G_nm[n,n] -= V * (Ka_n[n] * (x_R/V - (s[:]**2).sum()) + 1.0)
G_nm[n,:] *= 2. * s[n]
return G_nm
# Allocate storage for complexes
# Compute equilibrium concentrations.
#x0 = numpy.zeros([nspecies], numpy.float64)
#x0 = (x_Ln / V).copy()
#x = scipy.optimize.fsolve(func, x0, fprime=fprime)
#C_RLn = x
#x0 = numpy.sqrt(x_Ln / V).copy()
#x = scipy.optimize.fsolve(sfunc, x0, fprime=sfprime)
#C_RLn = x**2
def objective(x):
f_n = func(x)
G_nm = fprime(x)
obj = (f_n**2).sum()
grad = 0.0 * f_n
for n in range(f_n.size):
grad += 2 * f_n[n] * G_nm[n,:]
return (obj, grad)
#x0 = numpy.zeros([nspecies], numpy.float64)
#bounds = list()
#for n in range(nspecies):
# m = min(C0_R, C0_Ln[n])
# bounds.append( (0., m) )
#[x, a, b] = scipy.optimize.fmin_l_bfgs_b(objective, x0, bounds=bounds)
#C_RLn = x
def ode(c_n, t, Ka_n, x_Ln, x_R):
dc_n = - c_n[:] + Ka_n[:] * (x_Ln[:]/V - c_n[:]) * (x_R/V - c_n[:].sum())
return dc_n
def odegrad(c_n, t, Ka_n, x_Ln, x_R):
N = c_n.size
d2c = numpy.zeros([N,N], numpy.float64)
for n in range(N):
d2c[n,:] = -Ka_n[n] * (x_Ln[n]/V - c_n[n])
d2c[n,n] += -(Ka_n[n] * (x_R/V - c_n[:].sum()) + 1.0)
return d2c
#if c0 is None: c0 = numpy.zeros([nspecies], numpy.float64)
#maxtime = 100.0 * (x_R/V) / Ka_n.max()
#time = [0, maxtime / 2.0, maxtime]
#c = scipy.integrate.odeint(ode, c0, time, Dfun=odegrad, args=(Ka_n, x_Ln, x_R))
#C_RLn = c[-1,:]
#c = numpy.zeros([nspecies], numpy.float64)
#maxtime = 1.0 / Ka_n.min()
#maxtime = 1.0 / ((x_R/V) * Ka_n.min())
#maxtime = 1.0
#time = [0, maxtime]
#c = scipy.optimize.fsolve(ode, c, fprime=odegrad, args=(0.0, Ka_n, x_Ln, x_R), xtol=1.0e-6)
#c = scipy.integrate.odeint(ode, c, time, Dfun=odegrad, args=(Ka_n, x_Ln, x_R), mxstep=50000)
#c = c[-1,:]
#C_RLn = c
#print "C_RLn = ", C_RLn
#print ""
c = numpy.zeros([nspecies], numpy.float64)
sorted_indices = numpy.argsort(-x_Ln)
for n in range(nspecies):
indices = sorted_indices[0:n+1]
#c[indices] = scipy.optimize.fsolve(ode, c[indices], fprime=odegrad, args=(0.0, Ka_n[indices], x_Ln[indices], x_R), xtol=1.0e-6, warning=False)
c[indices] = scipy.optimize.fsolve(ode, c[indices], fprime=odegrad, args=(0.0, Ka_n[indices], x_Ln[indices], x_R), xtol=1.0e-6)
C_RLn = c
return C_RLn
def expected_injection_heats(self, experiment, true_sample_cell_concentrations, true_syringe_concentrations, DeltaH_0, thermodynamic_parameters):
"""
Expected heats of injection for two-component binding model.
TODO
- Make experiment a dict, or somehow tell it how to replace members of 'experiment'?
ARGUMENTS
sample_cell_concentrations (dict of floats) - concentrations[species] is the initial concentration of species in sample cell, or zero if absent (M)
syringe_concentrations (dict of floats) - concentrations[species] is the initial concentration of species in sample cell, or zero if absent (M)
thermodynamic_parameters (dict of floats) - thermodynamic_parameters[parameter] is the value of thermodynamic parameter (kcal/mol)
e.g. for parameter 'DeltaG of receptor * species'
V_n (numpy array of floats) - V_n[n] is injection volume of injection n (L)
"""
debug = False
# Number of ligand species
nspecies = len(self.ligands)
# Compute association constants for receptor and each ligand species.
DeltaG_n = numpy.zeros([nspecies], numpy.float64) #
for (n, ligand) in enumerate(self.ligands):
name = "DeltaG of %s * %s" % (self.receptor, ligand) # determine name of free energy of binding for this ligand
DeltaG_n[n] = thermodynamic_parameters[name] # retrieve free energy of binding
Ka_n = numpy.exp(-self.beta * DeltaG_n[:]) / C0 # compute association constant (1/M)
# Compute the quantity of each species in the sample cell after each injection.
# NOTE: These quantities are correct for a perfusion-type model. This would be modified for a cumulative model.
x_Ri = numpy.zeros([experiment.ninjections], numpy.float64) # x_Ri[i] is the number of moles of receptor in sample cell after injection i
x_Lin = numpy.zeros([experiment.ninjections, nspecies], numpy.float64) # x_Lin[i,n] is the number of moles of ligand n in sample cell after injection i
dcum = 1.0 # cumulative dilution factor
for i in range(experiment.ninjections):
d = 1.0 - (experiment.injection_volumes[i] / self.V0) # dilution factor (dimensionless)
dcum *= d # cumulative dilution factor (dimensionless)
x_Ri[i] = true_sample_cell_concentrations[self.receptor] * dcum + true_syringe_concentrations[self.receptor] * (1.0 - dcum)
for (n, ligand) in enumerate(self.ligands):
x_Lin[i,n] = true_sample_cell_concentrations[ligand] * dcum + true_syringe_concentrations[ligand] * (1.0 - dcum)
# DEBUG
#print "true_sample_cell_concentrations: ", true_sample_cell_concentrations
#print "true_syringe_concentrations: ", true_syringe_concentrations
#print "x_R in mol:"
#print x_Ri
#print "x_Lin in mol: "
#print x_Lin
# Solve for initial concentration.
x_R0 = true_sample_cell_concentrations[self.receptor]
x_L0n = numpy.zeros([nspecies], numpy.float64)
C_RL0n = numpy.zeros([nspecies], numpy.float64)
for (n, ligand) in enumerate(self.ligands):
x_L0n[n] = true_sample_cell_concentrations[ligand]
C_RL0n[:] = self.equilibrium_concentrations(Ka_n, x_R0, x_L0n[:], self.V0)
#print "C_RL0n in uM:"
#print C_RL0n * 1.e6
# Compute complex concentrations after each injection.
# NOTE: The total cell volume would be modified for a cumulative model.
C_RLin = numpy.zeros([experiment.ninjections,nspecies], numpy.float64) # C_RLin[i,n] is the concentration of complex RLn[n] after injection i
for i in range(experiment.ninjections):
C_RLin[i,:] = self.equilibrium_concentrations(Ka_n, x_Ri[i], x_Lin[i,:], self.V0)
#print "C_RLin in uM:"
#print C_RLin * 1e6
# Compile a list of thermodynamic parameters.
DeltaH_n = numpy.zeros([nspecies], numpy.float64) # DeltaH_n[n] is the enthalpy of association of ligand species n
for (n, ligand) in enumerate(self.ligands):
name = "DeltaH of %s * %s" % (self.receptor, ligand)
DeltaH_n[n] = thermodynamic_parameters[name]
# Compute expected injection heats.
# NOTE: This is for an instantaneous injection / perfusion model.
q_n = DeltaH_0 * numpy.ones([experiment.ninjections], numpy.float64) # q_n_model[n] is the expected heat from injection n
d = 1.0 - (experiment.injection_volumes[0] / self.V0) # dilution factor (dimensionless)
for n in range(nspecies):
q_n[0] += (1000.0*DeltaH_n[n]) * V0 * (C_RLin[0,n] - d*C_RL0n[n]) # first injection
for i in range(1,experiment.ninjections):
d = 1.0 - (experiment.injection_volumes[i] / self.V0) # dilution factor (dimensionless)
for n in range(nspecies):
q_n[i] += (1000.0*DeltaH_n[n]) * V0 * (C_RLin[i,n] - d*C_RLin[i-1,n]) # subsequent injections
# Debug output
debug = False
if debug:
print experiment.name
print "DeltaG = ", DeltaG_n
print "DeltaH = ", DeltaH_n
print "DeltaH_0 = ", DeltaH_0
print "model: ",
for heat in q_n:
print "%6.1f" % (heat*1e6),
print ""
print "obs : ",
for heat in experiment.observed_injection_heats:
print "%6.1f" % (heat*1e6),
print ""
print ""
return q_n
#=============================================================================================
# MAIN AND TESTS
#=============================================================================================
if __name__ == "__main__":
# Run doctests.
import doctest
doctest.testmod()
#=============================================================================================
# ABRF-MIRG'02 dataset 10
#=============================================================================================
V0 = 1.4301e-3 # volume of calorimeter sample cell (L)
V0 = V0 - 0.044e-3 # Tellinghuisen volume correction for VP-ITC (L)
DeltaV = 8.e-6 # injection volume (L)
P0_stated = 32.e-6 # protein stated concentration (M)
Ls_stated = 384.e-6 # ligand syringe stated concentration (M)
temperature = 298.15 # temperature (K)
q_n = numpy.array([
-13.343, -13.417, -13.279, -13.199, -13.118, -12.781, -12.600, -12.124, -11.633, -10.921, -10.009, -8.810,
-7.661, -6.272, -5.163, -4.228, -3.519, -3.055, -2.599, -2.512, -2.197, -2.096, -2.087, -1.959, -1.776, -1.879,
-1.894, -1.813, -1.740, -1.810]) # integrated heats of injection (kcal/mol injectant)
q_n = q_n * DeltaV * Ls_stated * 1000.0 # convert injection heats to cal/injection
beta = 1.0 / (kB * temperature) # inverse temperature 1/(kcal/mol)
#=============================================================================================
# Erensto Freire data for HIV protease inhibitors KNI-10033 and KNI-10075
#=============================================================================================
experiments = list()
#
# acetyl pepstatin
#
V0 = 1.4301e-3 # volume of calorimeter sample cell listed in manual (L)
V0 = V0 - 0.044e-3; # Tellinghuisen volume correction for VP-ITC (L)
sample_cell_concentrations = {'HIV protease' : 20.e-6}
syringe_concentrations = {'acetyl pepstatin' : 300.e-6}
Ls_stated = 300.e-6 # acetyl pepstatin concentration (M)
DeltaV = 10.e-6 # injection volume (L)
#injection_heats = numpy.array([1.6, 6.696, 6.695, 6.698, 6.617, 6.464, 6.336, 6.184, 5.652, 4.336, 2.970, 1.709, 0.947, 0.643, 0.441, 0.264, 0.269, 0.214, 0.138, 0.113, 0.062, 0.088, 0.016, 0.063, 0.012]) * 1000.0 * Ls_stated * DeltaV # first had to be estimated because it was omitted
injection_heats = numpy.array([6.696, 6.695, 6.698, 6.617, 6.464, 6.336, 6.184, 5.652, 4.336, 2.970, 1.709, 0.947, 0.643, 0.441, 0.264, 0.269, 0.214, 0.138, 0.113, 0.062, 0.088, 0.016, 0.063, 0.012]) * 1000.0 * Ls_stated * DeltaV # first injection omitted
N = len(injection_heats) # number of injections
injection_volumes = 10.e-6 * numpy.ones([N], numpy.float64) # injection volumes (L)
temperature = 298.15 # temperature (K)
experiment = Experiment(sample_cell_concentrations, syringe_concentrations, injection_volumes, injection_heats, temperature)
experiment.name = "acetyl pepstatin binding to HIV protease"
experiment.reference = "Nature Protocols 1:186, 2006; Fig. 1, left panel"
experiments.append(experiment)
#
# KNI-10033
#
sample_cell_concentrations = {'HIV protease' : 8.6e-6, 'acetyl pepstatin' : 510.e-6} # initial sample cell concentrations (M)
syringe_concentrations = {'KNI-10033' : 46.e-6}
Ls_stated = 46.e-6 # KNI-10033 syringe concentration (M)
DeltaV = 10.e-6 # injection volume (L)
#injection_heats = numpy.array([-12.106, -19.889, -19.896, -19.889, -19.797, -20.182, -19.889, -19.880, -19.849, -19.985, -19.716, -19.790, -19.654, -19.745, -19.622, -19.457, -19.378, -18.908, -17.964, -16.490, -12.273, -7.370, -4.649, -3.626, -3.203, -2.987, -2.841, -2.906, -2.796, -2.927]) * DeltaV * Ls_stated * 1000.0
injection_heats = numpy.array([-19.889, -19.896, -19.889, -19.797, -20.182, -19.889, -19.880, -19.849, -19.985, -19.716, -19.790, -19.654, -19.745, -19.622, -19.457, -19.378, -18.908, -17.964, -16.490, -12.273, -7.370, -4.649, -3.626, -3.203, -2.987, -2.841, -2.906, -2.796, -2.927]) * DeltaV * Ls_stated * 1000.0
N = len(injection_heats) # number of injections
injection_volumes = 10.e-6 * numpy.ones([N], numpy.float64) # injection volumes (L)
experiment = Experiment(sample_cell_concentrations, syringe_concentrations, injection_volumes, injection_heats, temperature)
experiment.name = "KNI-10033 displacement of acetyl pepstatin binding to HIV protease"
experiments.append(experiment)
#
# KNI-10075
#
sample_cell_concentrations = {'HIV protease' : 8.8e-6, 'acetyl pepstatin' : 510.e-6} # initial sample cell concentrations (M)
syringe_concentrations = {'KNI-10075' : 55.e-6}
Ls_stated = 55.e-6 # KNI-10033 syringe concentration (M)
DeltaV = 10.e-6 # injection volume (L)
injection_heats = numpy.array([-21.012, -22.716, -22.863, -22.632, -22.480, -22.236, -22.314, -22.569, -22.231, -22.529, -22.529, -21.773, -21.866, -21.412, -20.810, -18.664, -14.339, -11.028, -5.219, -3.612, -3.611, -3.389, -3.354, -3.122, -3.049, -3.083, -3.253, -3.089, -3.146, -3.252]) * DeltaV * Ls_stated * 1000.0
N = len(injection_heats) # number of injections
injection_volumes = 10.e-6 * numpy.ones([N], numpy.float64) # injection volumes (L)
experiment = Experiment(sample_cell_concentrations, syringe_concentrations, injection_volumes, injection_heats, temperature)
experiment.name = "KNI-10075 displacement of acetyl pepstatin binding to HIV protease"
experiments.append(experiment)
#=============================================================================================
# MCMC inference
#=============================================================================================
#model = TwoComponentBindingModel(Ls_stated, P0_stated, q_n, DeltaV, temperature, V0)
model = CompetitiveBindingModel(experiments, 'HIV protease', V0, verbose=True)
niters = 10000 # number of iterations
nburn = 1000 # number of burn-in iterations
nthin = 1 # thinning period
model.mcmc.sample(iter=niters, burn=nburn, thin=nthin, progress_bar=True)
pymc.Matplot.plot(model.mcmc)
| gpl-3.0 |
mitreaadrian/Soccersim | boost/boost_1_59_0/libs/python/test/polymorphism.py | 46 | 1917 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import unittest
from polymorphism_ext import *
class PolymorphTest(unittest.TestCase):
def testReturnCpp(self):
# Python Created Object With Same Id As
# Cpp Created B Object
# b = B(872)
# Get Reference To Cpp Created B Object
a = getBCppObj()
# Python Created B Object and Cpp B Object
# Should have same result by calling f()
self.failUnlessEqual ('B::f()', a.f())
self.failUnlessEqual ('B::f()', call_f(a))
self.failUnlessEqual ('A::f()', call_f(A()))
def test_references(self):
# B is not exposed to Python
a = getBCppObj()
self.failUnlessEqual(type(a), A)
# C is exposed to Python
c = getCCppObj()
self.failUnlessEqual(type(c), C)
def test_factory(self):
self.failUnlessEqual(type(factory(0)), A)
self.failUnlessEqual(type(factory(1)), A)
self.failUnlessEqual(type(factory(2)), C)
def test_return_py(self):
class X(A):
def f(self):
return 'X.f'
x = X()
self.failUnlessEqual ('X.f', x.f())
self.failUnlessEqual ('X.f', call_f(x))
def test_wrapper_downcast(self):
a = pass_a(D())
self.failUnlessEqual('D::g()', a.g())
def test_pure_virtual(self):
p = P()
self.assertRaises(RuntimeError, p.f)
q = Q()
self.failUnlessEqual ('Q::f()', q.f())
class R(P):
def f(self):
return 'R.f'
r = R()
self.failUnlessEqual ('R.f', r.f())
if __name__ == "__main__":
# remove the option which upsets unittest
import sys
sys.argv = [ x for x in sys.argv if x != '--broken-auto-ptr' ]
unittest.main()
| mit |
valtech-mooc/edx-platform | common/lib/xmodule/xmodule/tabs.py | 6 | 34362 | """
Implement CourseTab
"""
from abc import ABCMeta, abstractmethod
from xblock.fields import List
# We should only scrape strings for i18n in this file, since the target language is known only when
# they are rendered in the template. So ugettext gets called in the template.
_ = lambda text: text
class CourseTab(object):
"""
The Course Tab class is a data abstraction for all tabs (i.e., course navigation links) within a course.
It is an abstract class - to be inherited by various tab types.
Derived classes are expected to override methods as needed.
When a new tab class is created, it should define the type and add it in this class' factory method.
"""
__metaclass__ = ABCMeta
# Class property that specifies the type of the tab. It is generally a constant value for a
# subclass, shared by all instances of the subclass.
type = ''
# Class property that specifies whether the tab can be hidden for a particular course
is_hideable = False
# Class property that specifies whether the tab can be moved within a course's list of tabs
is_movable = True
# Class property that specifies whether the tab is a collection of other tabs
is_collection = False
def __init__(self, name, tab_id, link_func):
"""
Initializes class members with values passed in by subclasses.
Args:
name: The name of the tab
tab_id: Intended to be a unique id for this tab, although it is currently not enforced
within this module. It is used by the UI to determine which page is active.
link_func: A function that computes the link for the tab,
given the course and a reverse-url function as input parameters
"""
self.name = name
self.tab_id = tab_id
self.link_func = link_func
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled): # pylint: disable=unused-argument
"""
Determines whether the tab should be displayed in the UI for the given course and a particular user.
This method is to be overridden by subclasses when applicable. The base class implementation
always returns True.
Args:
course: An xModule CourseDescriptor
settings: The configuration settings, including values for:
WIKI_ENABLED
FEATURES['ENABLE_DISCUSSION_SERVICE']
FEATURES['ENABLE_EDXNOTES']
FEATURES['ENABLE_STUDENT_NOTES']
FEATURES['ENABLE_TEXTBOOK']
is_user_authenticated: Indicates whether the user is authenticated. If the tab is of
type AuthenticatedCourseTab and this value is False, then can_display will return False.
is_user_staff: Indicates whether the user has staff access to the course. If the tab is of
type StaffTab and this value is False, then can_display will return False.
is_user_enrolled: Indicates whether the user is enrolled in the course
Returns:
A boolean value to indicate whether this instance of the tab should be displayed to a
given user for the given course.
"""
return True
def get(self, key, default=None):
"""
Akin to the get method on Python dictionary objects, gracefully returns the value associated with the
given key, or the default if key does not exist.
"""
try:
return self[key]
except KeyError:
return default
def __getitem__(self, key):
"""
This method allows callers to access CourseTab members with the d[key] syntax as is done with
Python dictionary objects.
"""
if key == 'name':
return self.name
elif key == 'type':
return self.type
elif key == 'tab_id':
return self.tab_id
else:
raise KeyError('Key {0} not present in tab {1}'.format(key, self.to_json()))
def __setitem__(self, key, value):
"""
This method allows callers to change CourseTab members with the d[key]=value syntax as is done with
Python dictionary objects. For example: course_tab['name'] = new_name
Note: the 'type' member can be 'get', but not 'set'.
"""
if key == 'name':
self.name = value
elif key == 'tab_id':
self.tab_id = value
else:
raise KeyError('Key {0} cannot be set in tab {1}'.format(key, self.to_json()))
def __eq__(self, other):
"""
Overrides the equal operator to check equality of member variables rather than the object's address.
Also allows comparison with dict-type tabs (needed to support callers implemented before this class
was implemented).
"""
if isinstance(other, dict) and not self.validate(other, raise_error=False):
# 'other' is a dict-type tab and did not validate
return False
# allow tabs without names; if a name is required, its presence was checked in the validator.
name_is_eq = (other.get('name') is None or self.name == other['name'])
# only compare the persisted/serialized members: 'type' and 'name'
return self.type == other.get('type') and name_is_eq
def __ne__(self, other):
"""
Overrides the not equal operator as a partner to the equal operator.
"""
return not (self == other)
@classmethod
def validate(cls, tab_dict, raise_error=True):
"""
Validates the given dict-type tab object to ensure it contains the expected keys.
This method should be overridden by subclasses that require certain keys to be persisted in the tab.
"""
return key_checker(['type'])(tab_dict, raise_error)
def to_json(self):
"""
Serializes the necessary members of the CourseTab object to a json-serializable representation.
This method is overridden by subclasses that have more members to serialize.
Returns:
a dictionary with keys for the properties of the CourseTab object.
"""
return {'type': self.type, 'name': self.name}
@staticmethod
def from_json(tab_dict):
"""
Deserializes a CourseTab from a json-like representation.
The subclass that is instantiated is determined by the value of the 'type' key in the
given dict-type tab. The given dict-type tab is validated before instantiating the CourseTab object.
Args:
tab: a dictionary with keys for the properties of the tab.
Raises:
InvalidTabsException if the given tab doesn't have the right keys.
"""
sub_class_types = {
'courseware': CoursewareTab,
'course_info': CourseInfoTab,
'wiki': WikiTab,
'discussion': DiscussionTab,
'external_discussion': ExternalDiscussionTab,
'external_link': ExternalLinkTab,
'textbooks': TextbookTabs,
'pdf_textbooks': PDFTextbookTabs,
'html_textbooks': HtmlTextbookTabs,
'progress': ProgressTab,
'static_tab': StaticTab,
'peer_grading': PeerGradingTab,
'staff_grading': StaffGradingTab,
'open_ended': OpenEndedGradingTab,
'notes': NotesTab,
'edxnotes': EdxNotesTab,
'syllabus': SyllabusTab,
'instructor': InstructorTab, # not persisted
}
tab_type = tab_dict.get('type')
if tab_type not in sub_class_types:
raise InvalidTabsException(
'Unknown tab type {0}. Known types: {1}'.format(tab_type, sub_class_types)
)
tab_class = sub_class_types[tab_dict['type']]
tab_class.validate(tab_dict)
return tab_class(tab_dict=tab_dict)
class AuthenticatedCourseTab(CourseTab):
"""
Abstract class for tabs that can be accessed by only authenticated users.
"""
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled):
return is_user_authenticated
class StaffTab(AuthenticatedCourseTab):
"""
Abstract class for tabs that can be accessed by only users with staff access.
"""
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled): # pylint: disable=unused-argument
return is_user_staff
class EnrolledOrStaffTab(CourseTab):
"""
Abstract class for tabs that can be accessed by only users with staff access
or users enrolled in the course.
"""
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled): # pylint: disable=unused-argument
return is_user_authenticated and (is_user_staff or is_user_enrolled)
class HideableTab(CourseTab):
"""
Abstract class for tabs that are hideable
"""
is_hideable = True
def __init__(self, name, tab_id, link_func, tab_dict):
super(HideableTab, self).__init__(
name=name,
tab_id=tab_id,
link_func=link_func,
)
self.is_hidden = tab_dict.get('is_hidden', False) if tab_dict else False
def __getitem__(self, key):
if key == 'is_hidden':
return self.is_hidden
else:
return super(HideableTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'is_hidden':
self.is_hidden = value
else:
super(HideableTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(HideableTab, self).to_json()
if self.is_hidden:
to_json_val.update({'is_hidden': True})
return to_json_val
def __eq__(self, other):
if not super(HideableTab, self).__eq__(other):
return False
return self.is_hidden == other.get('is_hidden', False)
class CoursewareTab(EnrolledOrStaffTab):
"""
A tab containing the course content.
"""
type = 'courseware'
is_movable = False
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(CoursewareTab, self).__init__(
# Translators: 'Courseware' refers to the tab in the courseware that leads to the content of a course
name=_('Courseware'), # support fixed name for the courseware tab
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
class CourseInfoTab(CourseTab):
"""
A tab containing information about the course.
"""
type = 'course_info'
is_movable = False
def __init__(self, tab_dict=None):
super(CourseInfoTab, self).__init__(
# Translators: "Course Info" is the name of the course's information and updates page
name=tab_dict['name'] if tab_dict else _('Course Info'),
tab_id='info',
link_func=link_reverse_func('info'),
)
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(CourseInfoTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class ProgressTab(EnrolledOrStaffTab):
"""
A tab containing information about the authenticated user's progress.
"""
type = 'progress'
def __init__(self, tab_dict=None):
super(ProgressTab, self).__init__(
# Translators: "Progress" is the name of the student's course progress page
name=tab_dict['name'] if tab_dict else _('Progress'),
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled):
super_can_display = super(ProgressTab, self).can_display(
course, settings, is_user_authenticated, is_user_staff, is_user_enrolled
)
return super_can_display and not course.hide_progress_tab
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(ProgressTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class WikiTab(HideableTab):
"""
A tab_dict containing the course wiki.
"""
type = 'wiki'
def __init__(self, tab_dict=None):
super(WikiTab, self).__init__(
# Translators: "Wiki" is the name of the course's wiki page
name=tab_dict['name'] if tab_dict else _('Wiki'),
tab_id=self.type,
link_func=link_reverse_func('course_wiki'),
tab_dict=tab_dict,
)
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled):
return settings.WIKI_ENABLED and (
course.allow_public_wiki_access or is_user_enrolled or is_user_staff
)
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(WikiTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class DiscussionTab(EnrolledOrStaffTab):
"""
A tab only for the new Berkeley discussion forums.
"""
type = 'discussion'
def __init__(self, tab_dict=None):
super(DiscussionTab, self).__init__(
# Translators: "Discussion" is the title of the course forum page
name=tab_dict['name'] if tab_dict else _('Discussion'),
tab_id=self.type,
link_func=link_reverse_func('django_comment_client.forum.views.forum_form_discussion'),
)
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled):
super_can_display = super(DiscussionTab, self).can_display(
course, settings, is_user_authenticated, is_user_staff, is_user_enrolled
)
return settings.FEATURES.get('ENABLE_DISCUSSION_SERVICE') and super_can_display
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(DiscussionTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, name, tab_id, link_value):
self.link_value = link_value
super(LinkTab, self).__init__(
name=name,
tab_id=tab_id,
link_func=link_value_func(self.link_value),
)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(LinkTab, cls).validate(tab_dict, raise_error) and key_checker(['link'])(tab_dict, raise_error)
class ExternalDiscussionTab(LinkTab):
"""
A tab that links to an external discussion service.
"""
type = 'external_discussion'
def __init__(self, tab_dict=None, link_value=None):
super(ExternalDiscussionTab, self).__init__(
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
name=_('Discussion'),
tab_id='discussion',
link_value=tab_dict['link'] if tab_dict else link_value,
)
class ExternalLinkTab(LinkTab):
"""
A tab containing an external link.
"""
type = 'external_link'
def __init__(self, tab_dict):
super(ExternalLinkTab, self).__init__(
name=tab_dict['name'],
tab_id=None, # External links are never active.
link_value=tab_dict['link'],
)
class StaticTab(CourseTab):
"""
A custom tab.
"""
type = 'static_tab'
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(StaticTab, cls).validate(tab_dict, raise_error) and key_checker(['name', 'url_slug'])(tab_dict, raise_error)
def __init__(self, tab_dict=None, name=None, url_slug=None):
self.url_slug = tab_dict['url_slug'] if tab_dict else url_slug
super(StaticTab, self).__init__(
name=tab_dict['name'] if tab_dict else name,
tab_id='static_tab_{0}'.format(self.url_slug),
link_func=lambda course, reverse_func: reverse_func(self.type, args=[course.id.to_deprecated_string(), self.url_slug]),
)
def __getitem__(self, key):
if key == 'url_slug':
return self.url_slug
else:
return super(StaticTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'url_slug':
self.url_slug = value
else:
super(StaticTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(StaticTab, self).to_json()
to_json_val.update({'url_slug': self.url_slug})
return to_json_val
def __eq__(self, other):
if not super(StaticTab, self).__eq__(other):
return False
return self.url_slug == other.get('url_slug')
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
class TextbookTabsBase(AuthenticatedCourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
is_collection = True
def __init__(self, tab_id):
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
super(TextbookTabsBase, self).__init__(
name=_("Textbooks"),
tab_id=tab_id,
link_func=None,
)
@abstractmethod
def items(self, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
pass
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(TextbookTabs, self).__init__(
tab_id=self.type,
)
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled):
return settings.FEATURES.get('ENABLE_TEXTBOOK')
def items(self, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
link_func=lambda course, reverse_func, index=index: reverse_func(
'book', args=[course.id.to_deprecated_string(), index]
),
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(PDFTextbookTabs, self).__init__(
tab_id=self.type,
)
def items(self, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
link_func=lambda course, reverse_func, index=index: reverse_func(
'pdf_book', args=[course.id.to_deprecated_string(), index]
),
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(HtmlTextbookTabs, self).__init__(
tab_id=self.type,
)
def items(self, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
link_func=lambda course, reverse_func, index=index: reverse_func(
'html_book', args=[course.id.to_deprecated_string(), index]
),
)
class GradingTab(object):
"""
Abstract class for tabs that involve Grading.
"""
pass
class StaffGradingTab(StaffTab, GradingTab):
"""
A tab for staff grading.
"""
type = 'staff_grading'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(StaffGradingTab, self).__init__(
# Translators: "Staff grading" appears on a tab that allows
# staff to view open-ended problems that require staff grading
name=_("Staff grading"),
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
class PeerGradingTab(AuthenticatedCourseTab, GradingTab):
"""
A tab for peer grading.
"""
type = 'peer_grading'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(PeerGradingTab, self).__init__(
# Translators: "Peer grading" appears on a tab that allows
# students to view open-ended problems that require grading
name=_("Peer grading"),
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
class OpenEndedGradingTab(AuthenticatedCourseTab, GradingTab):
"""
A tab for open ended grading.
"""
type = 'open_ended'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(OpenEndedGradingTab, self).__init__(
# Translators: "Open Ended Panel" appears on a tab that, when clicked, opens up a panel that
# displays information about open-ended problems that a user has submitted or needs to grade
name=_("Open Ended Panel"),
tab_id=self.type,
link_func=link_reverse_func('open_ended_notifications'),
)
class SyllabusTab(CourseTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled):
return hasattr(course, 'syllabus_present') and course.syllabus_present
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(SyllabusTab, self).__init__(
# Translators: "Syllabus" appears on a tab that, when clicked, opens the syllabus of the course.
name=_('Syllabus'),
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
class NotesTab(AuthenticatedCourseTab):
"""
A tab for the course notes.
"""
type = 'notes'
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled):
return settings.FEATURES.get('ENABLE_STUDENT_NOTES')
def __init__(self, tab_dict=None):
super(NotesTab, self).__init__(
name=tab_dict['name'],
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(NotesTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class EdxNotesTab(AuthenticatedCourseTab):
"""
A tab for the course student notes.
"""
type = 'edxnotes'
def can_display(self, course, settings, is_user_authenticated, is_user_staff, is_user_enrolled):
return settings.FEATURES.get('ENABLE_EDXNOTES')
def __init__(self, tab_dict=None):
super(EdxNotesTab, self).__init__(
name=tab_dict['name'] if tab_dict else _('Notes'),
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(EdxNotesTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class InstructorTab(StaffTab):
"""
A tab for the course instructors.
"""
type = 'instructor'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(InstructorTab, self).__init__(
# Translators: 'Instructor' appears on the tab that leads to the instructor dashboard, which is
# a portal where an instructor can get data and perform various actions on their course
name=_('Instructor'),
tab_id=self.type,
link_func=link_reverse_func('instructor_dashboard'),
)
class CourseTabList(List):
"""
An XBlock field class that encapsulates a collection of Tabs in a course.
It is automatically created and can be retrieved through a CourseDescriptor object: course.tabs
"""
@staticmethod
def initialize_default(course):
"""
An explicit initialize method is used to set the default values, rather than implementing an
__init__ method. This is because the default values are dependent on other information from
within the course.
"""
course.tabs.extend([
CoursewareTab(),
CourseInfoTab(),
])
# Presence of syllabus tab is indicated by a course attribute
if hasattr(course, 'syllabus_present') and course.syllabus_present:
course.tabs.append(SyllabusTab())
# If the course has a discussion link specified, use that even if we feature
# flag discussions off. Disabling that is mostly a server safety feature
# at this point, and we don't need to worry about external sites.
if course.discussion_link:
discussion_tab = ExternalDiscussionTab(link_value=course.discussion_link)
else:
discussion_tab = DiscussionTab()
course.tabs.extend([
TextbookTabs(),
discussion_tab,
WikiTab(),
ProgressTab(),
])
@staticmethod
def get_discussion(course):
"""
Returns the discussion tab for the given course. It can be either of type DiscussionTab
or ExternalDiscussionTab. The returned tab object is self-aware of the 'link' that it corresponds to.
"""
# the discussion_link setting overrides everything else, even if there is a discussion tab in the course tabs
if course.discussion_link:
return ExternalDiscussionTab(link_value=course.discussion_link)
# find one of the discussion tab types in the course tabs
for tab in course.tabs:
if isinstance(tab, DiscussionTab) or isinstance(tab, ExternalDiscussionTab):
return tab
return None
@staticmethod
def get_tab_by_slug(tab_list, url_slug):
"""
Look for a tab with the specified 'url_slug'. Returns the tab or None if not found.
"""
return next((tab for tab in tab_list if tab.get('url_slug') == url_slug), None)
@staticmethod
def get_tab_by_type(tab_list, tab_type):
"""
Look for a tab with the specified type. Returns the first matching tab.
"""
return next((tab for tab in tab_list if tab.type == tab_type), None)
@staticmethod
def get_tab_by_id(tab_list, tab_id):
"""
Look for a tab with the specified tab_id. Returns the first matching tab.
"""
return next((tab for tab in tab_list if tab.tab_id == tab_id), None)
@staticmethod
def iterate_displayable(
course,
settings,
is_user_authenticated=True,
is_user_staff=True,
is_user_enrolled=False
):
"""
Generator method for iterating through all tabs that can be displayed for the given course and
the given user with the provided access settings.
"""
for tab in course.tabs:
if tab.can_display(
course, settings, is_user_authenticated, is_user_staff, is_user_enrolled
) and (not tab.is_hideable or not tab.is_hidden):
if tab.is_collection:
for item in tab.items(course):
yield item
else:
yield tab
instructor_tab = InstructorTab()
if instructor_tab.can_display(course, settings, is_user_authenticated, is_user_staff, is_user_enrolled):
yield instructor_tab
@staticmethod
def iterate_displayable_cms(
course,
settings
):
"""
Generator method for iterating through all tabs that can be displayed for the given course
with the provided settings.
"""
for tab in course.tabs:
if tab.can_display(course, settings, is_user_authenticated=True, is_user_staff=True, is_user_enrolled=True):
if tab.is_collection and not len(list(tab.items(course))):
# do not yield collections that have no items
continue
yield tab
@classmethod
def validate_tabs(cls, tabs):
"""
Check that the tabs set for the specified course is valid. If it
isn't, raise InvalidTabsException with the complaint.
Specific rules checked:
- if no tabs specified, that's fine
- if tabs specified, first two must have type 'courseware' and 'course_info', in that order.
"""
if tabs is None or len(tabs) == 0:
return
if len(tabs) < 2:
raise InvalidTabsException("Expected at least two tabs. tabs: '{0}'".format(tabs))
if tabs[0].get('type') != CoursewareTab.type:
raise InvalidTabsException(
"Expected first tab to have type 'courseware'. tabs: '{0}'".format(tabs))
if tabs[1].get('type') != CourseInfoTab.type:
raise InvalidTabsException(
"Expected second tab to have type 'course_info'. tabs: '{0}'".format(tabs))
# the following tabs should appear only once
for tab_type in [
CoursewareTab.type,
CourseInfoTab.type,
NotesTab.type,
TextbookTabs.type,
PDFTextbookTabs.type,
HtmlTextbookTabs.type,
EdxNotesTab.type]:
cls._validate_num_tabs_of_type(tabs, tab_type, 1)
@staticmethod
def _validate_num_tabs_of_type(tabs, tab_type, max_num):
"""
Check that the number of times that the given 'tab_type' appears in 'tabs' is less than or equal to 'max_num'.
"""
count = sum(1 for tab in tabs if tab.get('type') == tab_type)
if count > max_num:
msg = (
"Tab of type '{type}' appears {count} time(s). "
"Expected maximum of {max} time(s)."
).format(
type=tab_type, count=count, max=max_num,
)
raise InvalidTabsException(msg)
def to_json(self, values):
"""
Overrides the to_json method to serialize all the CourseTab objects to a json-serializable representation.
"""
json_data = []
if values:
for val in values:
if isinstance(val, CourseTab):
json_data.append(val.to_json())
elif isinstance(val, dict):
json_data.append(val)
else:
continue
return json_data
def from_json(self, values):
"""
Overrides the from_json method to de-serialize the CourseTab objects from a json-like representation.
"""
self.validate_tabs(values)
return [CourseTab.from_json(tab_dict) for tab_dict in values]
#### Link Functions
def link_reverse_func(reverse_name):
"""
Returns a function that takes in a course and reverse_url_func,
and calls the reverse_url_func with the given reverse_name and course' ID.
"""
return lambda course, reverse_url_func: reverse_url_func(reverse_name, args=[course.id.to_deprecated_string()])
def link_value_func(value):
"""
Returns a function takes in a course and reverse_url_func, and returns the given value.
"""
return lambda course, reverse_url_func: value
#### Validators
# A validator takes a dict and raises InvalidTabsException if required fields are missing or otherwise wrong.
# (e.g. "is there a 'name' field?). Validators can assume that the type field is valid.
def key_checker(expected_keys):
"""
Returns a function that checks that specified keys are present in a dict.
"""
def check(actual_dict, raise_error=True):
"""
Function that checks whether all keys in the expected_keys object is in the given actual_dict object.
"""
missing = set(expected_keys) - set(actual_dict.keys())
if not missing:
return True
if raise_error:
raise InvalidTabsException(
"Expected keys '{0}' are not present in the given dict: {1}".format(expected_keys, actual_dict)
)
else:
return False
return check
def need_name(dictionary, raise_error=True):
"""
Returns whether the 'name' key exists in the given dictionary.
"""
return key_checker(['name'])(dictionary, raise_error)
class InvalidTabsException(Exception):
"""
A complaint about invalid tabs.
"""
pass
class UnequalTabsException(Exception):
"""
A complaint about tab lists being unequal
"""
pass
| agpl-3.0 |
kylazhang/virt-test | qemu/tests/migration_multi_host_with_speed_measurement.py | 2 | 8531 | import os
import re
import logging
import time
import socket
from autotest.client.shared import error, utils
from autotest.client.shared.barrier import listen_server
from autotest.client.shared.syncdata import SyncData
from virttest import utils_test, utils_misc
def run_migration_multi_host_with_speed_measurement(test, params, env):
"""
KVM migration test:
1) Get a live VM and clone it.
2) Verify that the source VM supports migration. If it does, proceed with
the test.
3) Start memory load in vm.
4) Set defined migration speed.
5) Send a migration command to the source VM and collecting statistic
of migration speed.
!) Checks that migration utilisation didn't slow down in guest stresser
which would lead to less page-changes than required for this test.
(migration speed is set too high for current CPU)
6) Kill both VMs.
7) Print statistic of migration.
:param test: kvm test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
mig_protocol = params.get("mig_protocol", "tcp")
base_class = utils_test.qemu.MultihostMigration
if mig_protocol == "fd":
base_class = utils_test.qemu.MultihostMigrationFd
if mig_protocol == "exec":
base_class = utils_test.qemu.MultihostMigrationExec
install_path = params.get("cpuflags_install_path", "/tmp")
vm_mem = int(params.get("mem", "512"))
get_mig_speed = re.compile("^transferred ram: (\d+) kbytes$",
re.MULTILINE)
mig_speed = params.get("mig_speed", "1G")
mig_speed_accuracy = float(params.get("mig_speed_accuracy", "0.2"))
def get_migration_statistic(vm):
last_transfer_mem = 0
transfered_mem = 0
mig_stat = utils.Statistic()
for _ in range(30):
o = vm.monitor.info("migrate")
warning_msg = ("Migration already ended. Migration speed is"
" probably too high and will block vm while"
" filling its memory.")
fail_msg = ("Could not determine the transferred memory from"
" monitor data: %s" % o)
if isinstance(o, str):
if not "status: active" in o:
raise error.TestWarn(warning_msg)
try:
transfered_mem = int(get_mig_speed.search(o).groups()[0])
except (IndexError, ValueError):
raise error.TestFail(fail_msg)
else:
if o.get("status") != "active":
raise error.TestWarn(warning_msg)
try:
transfered_mem = o.get("ram").get("transferred") / (1024)
except (IndexError, ValueError):
raise error.TestFail(fail_msg)
real_mig_speed = (transfered_mem - last_transfer_mem) / 1024
last_transfer_mem = transfered_mem
logging.debug("Migration speed: %s MB/s" % (real_mig_speed))
mig_stat.record(real_mig_speed)
time.sleep(1)
return mig_stat
class TestMultihostMigration(base_class):
def __init__(self, test, params, env):
super(TestMultihostMigration, self).__init__(test, params, env)
self.mig_stat = None
self.srchost = self.params.get("hosts")[0]
self.dsthost = self.params.get("hosts")[1]
self.id = {'src': self.srchost,
'dst': self.dsthost,
"type": "speed_measurement"}
self.link_speed = 0
def check_vms(self, mig_data):
"""
Check vms after migrate.
:param mig_data: object with migration data.
"""
pass
def migrate_vms_src(self, mig_data):
"""
Migrate vms source.
:param mig_Data: Data for migration.
For change way how machine migrates is necessary
re implement this method.
"""
super_cls = super(TestMultihostMigration, self)
super_cls.migrate_vms_src(mig_data)
vm = mig_data.vms[0]
self.mig_stat = get_migration_statistic(vm)
def migration_scenario(self):
sync = SyncData(self.master_id(), self.hostid, self.hosts,
self.id, self.sync_server)
srchost = self.params.get("hosts")[0]
dsthost = self.params.get("hosts")[1]
vms = [params.get("vms").split()[0]]
def worker(mig_data):
vm = mig_data.vms[0]
session = vm.wait_for_login(timeout=self.login_timeout)
utils_misc.install_cpuflags_util_on_vm(test, vm, install_path,
extra_flags="-msse3 -msse2")
cmd = ("%s/cpuflags-test --stressmem %d,%d" %
(os.path.join(install_path, "test_cpu_flags"),
vm_mem * 4, vm_mem / 2))
logging.debug("Sending command: %s" % (cmd))
session.sendline(cmd)
if self.master_id() == self.hostid:
server_port = utils_misc.find_free_port(5200, 6000)
server = listen_server(port=server_port)
data_len = 0
sync.sync(server_port, timeout=120)
client = server.socket.accept()[0]
endtime = time.time() + 30
while endtime > time.time():
data_len += len(client.recv(2048))
client.close()
server.close()
self.link_speed = data_len / (30 * 1024 * 1024)
logging.info("Link speed %d MB/s" % (self.link_speed))
ms = utils.convert_data_size(mig_speed, 'M')
if (ms > data_len / 30):
logging.warn("Migration speed %s MB/s is set faster than "
"real link speed %d MB/s" % (mig_speed,
self.link_speed))
else:
self.link_speed = ms / (1024 * 1024)
else:
data = ""
for _ in range(10000):
data += "i"
server_port = sync.sync(timeout=120)[self.master_id()]
sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
sock.connect((self.master_id(), server_port))
try:
endtime = time.time() + 10
while endtime > time.time():
sock.sendall(data)
sock.close()
except:
pass
self.migrate_wait(vms, srchost, dsthost, worker)
mig = TestMultihostMigration(test, params, env)
# Start migration
mig.run()
# If machine is migration master check migration statistic.
if mig.master_id() == mig.hostid:
mig_speed = utils.convert_data_size(mig_speed, "M")
mig_stat = mig.mig_stat
mig_speed = mig_speed / (1024 * 1024)
real_speed = mig_stat.get_average()
ack_speed = mig.link_speed * mig_speed_accuracy
logging.info("Target migration speed: %d MB/s", mig_speed)
logging.info("Real Link speed: %d MB/s", mig.link_speed)
logging.info(
"Average migration speed: %d MB/s", mig_stat.get_average())
logging.info("Minimum migration speed: %d MB/s", mig_stat.get_min())
logging.info("Maximum migration speed: %d MB/s", mig_stat.get_max())
logging.info("Maximum tolerable divergence: %3.1f%%",
mig_speed_accuracy * 100)
if real_speed < mig_speed - ack_speed:
divergence = (1 - float(real_speed) / float(mig_speed)) * 100
raise error.TestWarn("Average migration speed (%s MB/s) "
"is %3.1f%% lower than target (%s MB/s)" %
(real_speed, divergence, mig_speed))
if real_speed > mig_speed + ack_speed:
divergence = (1 - float(mig_speed) / float(real_speed)) * 100
raise error.TestWarn("Average migration speed (%s MB/s) "
"is %3.1f %% higher than target (%s MB/s)" %
(real_speed, divergence, mig_speed))
| gpl-2.0 |
thnee/ansible | lib/ansible/modules/network/fortimanager/fmgr_script.py | 39 | 8637 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_script
version_added: "2.5"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author: Andrew Welsh (@Ghilli3)
short_description: Add/Edit/Delete and execute scripts
description: Create/edit/delete scripts and execute the scripts on the FortiManager using jsonrpc API
options:
adom:
description:
- The administrative domain (admon) the configuration belongs to
required: true
vdom:
description:
- The virtual domain (vdom) the configuration belongs to
mode:
description:
- The desired mode of the specified object. Execute will run the script.
required: false
default: "add"
choices: ["add", "delete", "execute", "set"]
version_added: "2.8"
script_name:
description:
- The name of the script.
required: True
script_type:
description:
- The type of script (CLI or TCL).
required: false
script_target:
description:
- The target of the script to be run.
required: false
script_description:
description:
- The description of the script.
required: false
script_content:
description:
- The script content that will be executed.
required: false
script_scope:
description:
- (datasource) The devices that the script will run on, can have both device member and device group member.
required: false
script_package:
description:
- (datasource) Policy package object to run the script against
required: false
'''
EXAMPLES = '''
- name: CREATE SCRIPT
fmgr_script:
adom: "root"
script_name: "TestScript"
script_type: "cli"
script_target: "remote_device"
script_description: "Create by Ansible"
script_content: "get system status"
- name: EXECUTE SCRIPT
fmgr_script:
adom: "root"
script_name: "TestScript"
mode: "execute"
script_scope: "FGT1,FGT2"
- name: DELETE SCRIPT
fmgr_script:
adom: "root"
script_name: "TestScript"
mode: "delete"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import FMGRMethods
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def set_script(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
datagram = {
'content': paramgram["script_content"],
'desc': paramgram["script_description"],
'name': paramgram["script_name"],
'target': paramgram["script_target"],
'type': paramgram["script_type"],
}
url = '/dvmdb/adom/{adom}/script/'.format(adom=paramgram["adom"])
response = fmgr.process_request(url, datagram, FMGRMethods.SET)
return response
def delete_script(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
datagram = {
'name': paramgram["script_name"],
}
url = '/dvmdb/adom/{adom}/script/{script_name}'.format(adom=paramgram["adom"], script_name=paramgram["script_name"])
response = fmgr.process_request(url, datagram, FMGRMethods.DELETE)
return response
def execute_script(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
scope_list = list()
scope = paramgram["script_scope"].replace(' ', '')
scope = scope.split(',')
for dev_name in scope:
scope_list.append({'name': dev_name, 'vdom': paramgram["vdom"]})
datagram = {
'adom': paramgram["adom"],
'script': paramgram["script_name"],
'package': paramgram["script_package"],
'scope': scope_list,
}
url = '/dvmdb/adom/{adom}/script/execute'.format(adom=paramgram["adom"])
response = fmgr.process_request(url, datagram, FMGRMethods.EXEC)
return response
def main():
argument_spec = dict(
adom=dict(required=False, type="str", default="root"),
vdom=dict(required=False, type="str", default="root"),
mode=dict(choices=["add", "execute", "set", "delete"], type="str", default="add"),
script_name=dict(required=True, type="str"),
script_type=dict(required=False, type="str"),
script_target=dict(required=False, type="str"),
script_description=dict(required=False, type="str"),
script_content=dict(required=False, type="str"),
script_scope=dict(required=False, type="str"),
script_package=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
paramgram = {
"script_name": module.params["script_name"],
"script_type": module.params["script_type"],
"script_target": module.params["script_target"],
"script_description": module.params["script_description"],
"script_content": module.params["script_content"],
"script_scope": module.params["script_scope"],
"script_package": module.params["script_package"],
"adom": module.params["adom"],
"vdom": module.params["vdom"],
"mode": module.params["mode"],
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
results = DEFAULT_RESULT_OBJ
try:
if paramgram["mode"] in ['add', 'set']:
results = set_script(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, msg="Operation Finished",
ansible_facts=fmgr.construct_ansible_facts(results, module.params, module.params))
except Exception as err:
raise FMGBaseException(err)
try:
if paramgram["mode"] == "execute":
results = execute_script(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, msg="Operation Finished",
ansible_facts=fmgr.construct_ansible_facts(results, module.params, module.params))
except Exception as err:
raise FMGBaseException(err)
try:
if paramgram["mode"] == "delete":
results = delete_script(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, msg="Operation Finished",
ansible_facts=fmgr.construct_ansible_facts(results, module.params, module.params))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 |
sony-omni/android_kernel_caf_msm8x26 | Documentation/target/tcm_mod_builder.py | 2358 | 40707 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
donald-pinckney/EM-Simulator | EM Sim/EM Sim/py_lib/lib2to3/fixes/fix_tuple_params.py | 324 | 5577 | """Fixer for function definitions with tuple parameters.
def func(((a, b), c), d):
...
->
def func(x, d):
((a, b), c) = x
...
It will also support lambdas:
lambda (x, y): x + y -> lambda t: t[0] + t[1]
# The parens are a syntax error in Python 3
lambda (x): x + y -> lambda x: x + y
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms
def is_docstring(stmt):
return isinstance(stmt, pytree.Node) and \
stmt.children[0].type == token.STRING
class FixTupleParams(fixer_base.BaseFix):
run_order = 4 #use a lower order since lambda is part of other
#patterns
BM_compatible = True
PATTERN = """
funcdef< 'def' any parameters< '(' args=any ')' >
['->' any] ':' suite=any+ >
|
lambda=
lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
':' body=any
>
"""
def transform(self, node, results):
if "lambda" in results:
return self.transform_lambda(node, results)
new_lines = []
suite = results["suite"]
args = results["args"]
# This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
# TODO(cwinter): suite-cleanup
if suite[0].children[1].type == token.INDENT:
start = 2
indent = suite[0].children[1].value
end = Newline()
else:
start = 0
indent = u"; "
end = pytree.Leaf(token.INDENT, u"")
# We need access to self for new_name(), and making this a method
# doesn't feel right. Closing over self and new_lines makes the
# code below cleaner.
def handle_tuple(tuple_arg, add_prefix=False):
n = Name(self.new_name())
arg = tuple_arg.clone()
arg.prefix = u""
stmt = Assign(arg, n.clone())
if add_prefix:
n.prefix = u" "
tuple_arg.replace(n)
new_lines.append(pytree.Node(syms.simple_stmt,
[stmt, end.clone()]))
if args.type == syms.tfpdef:
handle_tuple(args)
elif args.type == syms.typedargslist:
for i, arg in enumerate(args.children):
if arg.type == syms.tfpdef:
# Without add_prefix, the emitted code is correct,
# just ugly.
handle_tuple(arg, add_prefix=(i > 0))
if not new_lines:
return
# This isn't strictly necessary, but it plays nicely with other fixers.
# TODO(cwinter) get rid of this when children becomes a smart list
for line in new_lines:
line.parent = suite[0]
# TODO(cwinter) suite-cleanup
after = start
if start == 0:
new_lines[0].prefix = u" "
elif is_docstring(suite[0].children[start]):
new_lines[0].prefix = indent
after = start + 1
for line in new_lines:
line.parent = suite[0]
suite[0].children[after:after] = new_lines
for i in range(after+1, after+len(new_lines)+1):
suite[0].children[i].prefix = indent
suite[0].changed()
def transform_lambda(self, node, results):
args = results["args"]
body = results["body"]
inner = simplify_args(results["inner"])
# Replace lambda ((((x)))): x with lambda x: x
if inner.type == token.NAME:
inner = inner.clone()
inner.prefix = u" "
args.replace(inner)
return
params = find_params(args)
to_index = map_to_index(params)
tup_name = self.new_name(tuple_name(params))
new_param = Name(tup_name, prefix=u" ")
args.replace(new_param.clone())
for n in body.post_order():
if n.type == token.NAME and n.value in to_index:
subscripts = [c.clone() for c in to_index[n.value]]
new = pytree.Node(syms.power,
[new_param.clone()] + subscripts)
new.prefix = n.prefix
n.replace(new)
### Helper functions for transform_lambda()
def simplify_args(node):
if node.type in (syms.vfplist, token.NAME):
return node
elif node.type == syms.vfpdef:
# These look like vfpdef< '(' x ')' > where x is NAME
# or another vfpdef instance (leading to recursion).
while node.type == syms.vfpdef:
node = node.children[1]
return node
raise RuntimeError("Received unexpected node %s" % node)
def find_params(node):
if node.type == syms.vfpdef:
return find_params(node.children[1])
elif node.type == token.NAME:
return node.value
return [find_params(c) for c in node.children if c.type != token.COMMA]
def map_to_index(param_list, prefix=[], d=None):
if d is None:
d = {}
for i, obj in enumerate(param_list):
trailer = [Subscript(Number(unicode(i)))]
if isinstance(obj, list):
map_to_index(obj, trailer, d=d)
else:
d[obj] = prefix + trailer
return d
def tuple_name(param_list):
l = []
for obj in param_list:
if isinstance(obj, list):
l.append(tuple_name(obj))
else:
l.append(obj)
return u"_".join(l)
| apache-2.0 |
bzhpwr/MediExports | project_env/lib/python2.6/site-packages/werkzeug/_compat.py | 148 | 6190 | import sys
import operator
import functools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
int_to_byte = chr
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
iter_bytes = lambda x: iter(x)
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return lambda x: x
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return to_native(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
def iter_bytes(b):
return map(int_to_byte, b)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return lambda x: x
return lambda x: x.encode('latin1')
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
def wsgi_get_bytes(s):
return s.encode('latin1')
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s.decode('latin1', errors)
return s.encode(charset).decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
| gpl-2.0 |
rickerc/neutron_audit | neutron/agent/linux/ovsdb_monitor.py | 1 | 4106 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from neutron.agent.linux import async_process
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class OvsdbMonitor(async_process.AsyncProcess):
"""Manages an invocation of 'ovsdb-client monitor'."""
def __init__(self, table_name, columns=None, format=None,
root_helper=None, respawn_interval=None):
cmd = ['ovsdb-client', 'monitor', table_name]
if columns:
cmd.append(','.join(columns))
if format:
cmd.append('--format=%s' % format)
super(OvsdbMonitor, self).__init__(cmd,
root_helper=root_helper,
respawn_interval=respawn_interval)
def _read_stdout(self):
data = self._process.stdout.readline()
if not data:
return
#TODO(marun) The default root helper outputs exit errors to
# stdout due to bug #1219530. This check can be moved to
# _read_stderr once the error is correctly output to stderr.
if self.root_helper and self.root_helper in data:
self._stderr_lines.put(data)
LOG.error(_('Error received from ovsdb monitor: %s') % data)
else:
self._stdout_lines.put(data)
LOG.debug(_('Output received from ovsdb monitor: %s') % data)
return data
def _read_stderr(self):
data = super(OvsdbMonitor, self)._read_stderr()
if data:
LOG.error(_('Error received from ovsdb monitor: %s') % data)
# Do not return value to ensure that stderr output will
# stop the monitor.
class SimpleInterfaceMonitor(OvsdbMonitor):
"""Monitors the Interface table of the local host's ovsdb for changes.
The has_updates() method indicates whether changes to the ovsdb
Interface table have been detected since the monitor started or
since the previous access.
"""
def __init__(self, root_helper=None, respawn_interval=None):
super(SimpleInterfaceMonitor, self).__init__(
'Interface',
columns=['name'],
format='json',
root_helper=root_helper,
respawn_interval=respawn_interval,
)
self.data_received = False
@property
def is_active(self):
return (self.data_received and
self._kill_event and
not self._kill_event.ready())
@property
def has_updates(self):
"""Indicate whether the ovsdb Interface table has been updated.
True will be returned if the monitor process is not active.
This 'failing open' minimizes the risk of falsely indicating
the absense of updates at the expense of potential false
positives.
"""
return bool(list(self.iter_stdout())) or not self.is_active
def start(self, block=False, timeout=5):
super(SimpleInterfaceMonitor, self).start()
if block:
eventlet.timeout.Timeout(timeout)
while not self.is_active:
eventlet.sleep()
def _kill(self, *args, **kwargs):
self.data_received = False
super(SimpleInterfaceMonitor, self)._kill(*args, **kwargs)
def _read_stdout(self):
data = super(SimpleInterfaceMonitor, self)._read_stdout()
if data and not self.data_received:
self.data_received = True
return data
| apache-2.0 |
jbteixeir/Openflow-DC-Framework | pox/lib/packet/igmp.py | 27 | 3726 | # Copyright 2012 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
#
# IGMP v1/v2
#
# 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Ver * | Type | MRT/Unused ** | Checksum |
# +-------+-------+---------------+-------------------------------+
# | Group Address |
# +-------------------------------+-------------------------------+
#
# * In v2, there is no Version field, and Type is the whole 8 bits
# ** Max Response Time in v2 only
#
#======================================================================
#TODO: Support for IGMP v3
import struct
from packet_utils import *
from packet_base import packet_base
from pox.lib.addresses import *
MEMBERSHIP_QUERY = 0x11
MEMBERSHIP_REPORT = 0x12
MEMBERSHIP_REPORT_V2 = 0x16
LEAVE_GROUP_V2 = 0x17
# IGMP multicast address
IGMP_ADDRESS = IPAddr("224.0.0.22")
# IGMP IP protocol
IGMP_PROTOCOL = 2
class igmp (packet_base):
"""
IGMP Message
"""
MIN_LEN = 8
IGMP_ADDRESS = IGMP_ADDRESS
IGMP_PROTOCOL = IGMP_PROTOCOL
MEMBERSHIP_QUERY = MEMBERSHIP_QUERY
MEMBERSHIP_REPORT = MEMBERSHIP_REPORT
MEMBERSHIP_REPORT_V2 = MEMBERSHIP_REPORT_V2
LEAVE_GROUP_V2 = LEAVE_GROUP_V2
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.ver_and_type = 0
self.max_response_time = 0
self.csum = 0
self.address = None
self.extra = b''
if raw is not None:
self.parse(raw)
self._init(kw)
def hdr (self, payload):
s = struct.pack("!BBHi", self.ver_and_type, self.max_response_time,
0, self.address.toSigned(networkOrder=False))
s += self.extra
self.csum = checksum(s)
s = struct.pack("!BBHi", self.ver_and_type, self.max_response_time,
self.csum, self.address.toSigned(networkOrder=False))
s += self.extra
return s
def parse (self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < self.MIN_LEN:
self.msg('packet data too short to parse')
return None
self.ver_and_type, self.max_response_time, self.csum, ip = \
struct.unpack("!BBHi", raw[:self.MIN_LEN])
self.extra = raw[self.MIN_LEN:]
self.address = IPAddr(ip, networkOrder = False)
s = struct.pack("!BBHi", self.ver_and_type, self.max_response_time,
0, self.address.toSigned(networkOrder=False))
s += self.extra
csum = checksum(s)
if csum != self.csum:
self.err("IGMP hecksums don't match")
else:
self.parsed = True
def __str__ (self):
s = "[IGMP "
s += "vt:%02x %s" % (self.ver_and_type, self.address)
return s + "]"
| gpl-3.0 |
bousmalis/models | transformer/cluttered_mnist.py | 19 | 6535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import tensorflow as tf
from spatial_transformer import transformer
import numpy as np
from tf_utils import weight_variable, bias_variable, dense_to_one_hot
# %% Load data
mnist_cluttered = np.load('./data/mnist_sequence1_sample_5distortions5x5.npz')
X_train = mnist_cluttered['X_train']
y_train = mnist_cluttered['y_train']
X_valid = mnist_cluttered['X_valid']
y_valid = mnist_cluttered['y_valid']
X_test = mnist_cluttered['X_test']
y_test = mnist_cluttered['y_test']
# % turn from dense to one hot representation
Y_train = dense_to_one_hot(y_train, n_classes=10)
Y_valid = dense_to_one_hot(y_valid, n_classes=10)
Y_test = dense_to_one_hot(y_test, n_classes=10)
# %% Graph representation of our network
# %% Placeholders for 40x40 resolution
x = tf.placeholder(tf.float32, [None, 1600])
y = tf.placeholder(tf.float32, [None, 10])
# %% Since x is currently [batch, height*width], we need to reshape to a
# 4-D tensor to use it in a convolutional graph. If one component of
# `shape` is the special value -1, the size of that dimension is
# computed so that the total size remains constant. Since we haven't
# defined the batch dimension's shape yet, we use -1 to denote this
# dimension should not change size.
x_tensor = tf.reshape(x, [-1, 40, 40, 1])
# %% We'll setup the two-layer localisation network to figure out the
# %% parameters for an affine transformation of the input
# %% Create variables for fully connected layer
W_fc_loc1 = weight_variable([1600, 20])
b_fc_loc1 = bias_variable([20])
W_fc_loc2 = weight_variable([20, 6])
# Use identity transformation as starting point
initial = np.array([[1., 0, 0], [0, 1., 0]])
initial = initial.astype('float32')
initial = initial.flatten()
b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
# %% Define the two layer localisation network
h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
# %% We can add dropout for regularizing and to reduce overfitting like so:
keep_prob = tf.placeholder(tf.float32)
h_fc_loc1_drop = tf.nn.dropout(h_fc_loc1, keep_prob)
# %% Second layer
h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1_drop, W_fc_loc2) + b_fc_loc2)
# %% We'll create a spatial transformer module to identify discriminative
# %% patches
out_size = (40, 40)
h_trans = transformer(x_tensor, h_fc_loc2, out_size)
# %% We'll setup the first convolutional layer
# Weight matrix is [height x width x input_channels x output_channels]
filter_size = 3
n_filters_1 = 16
W_conv1 = weight_variable([filter_size, filter_size, 1, n_filters_1])
# %% Bias is [output_channels]
b_conv1 = bias_variable([n_filters_1])
# %% Now we can build a graph which does the first layer of convolution:
# we define our stride as batch x height x width x channels
# instead of pooling, we use strides of 2 and more layers
# with smaller filters.
h_conv1 = tf.nn.relu(
tf.nn.conv2d(input=h_trans,
filter=W_conv1,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv1)
# %% And just like the first layer, add additional layers to create
# a deep net
n_filters_2 = 16
W_conv2 = weight_variable([filter_size, filter_size, n_filters_1, n_filters_2])
b_conv2 = bias_variable([n_filters_2])
h_conv2 = tf.nn.relu(
tf.nn.conv2d(input=h_conv1,
filter=W_conv2,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv2)
# %% We'll now reshape so we can connect to a fully-connected layer:
h_conv2_flat = tf.reshape(h_conv2, [-1, 10 * 10 * n_filters_2])
# %% Create a fully-connected layer:
n_fc = 1024
W_fc1 = weight_variable([10 * 10 * n_filters_2, n_fc])
b_fc1 = bias_variable([n_fc])
h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# %% And finally our softmax layer:
W_fc2 = weight_variable([n_fc, 10])
b_fc2 = bias_variable([10])
y_logits = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# %% Define loss/eval/training functions
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=y_logits, labels=y))
opt = tf.train.AdamOptimizer()
optimizer = opt.minimize(cross_entropy)
grads = opt.compute_gradients(cross_entropy, [b_fc_loc2])
# %% Monitor accuracy
correct_prediction = tf.equal(tf.argmax(y_logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# %% We'll now train in minibatches and report accuracy, loss:
iter_per_epoch = 100
n_epochs = 500
train_size = 10000
indices = np.linspace(0, 10000 - 1, iter_per_epoch)
indices = indices.astype('int')
for epoch_i in range(n_epochs):
for iter_i in range(iter_per_epoch - 1):
batch_xs = X_train[indices[iter_i]:indices[iter_i+1]]
batch_ys = Y_train[indices[iter_i]:indices[iter_i+1]]
if iter_i % 10 == 0:
loss = sess.run(cross_entropy,
feed_dict={
x: batch_xs,
y: batch_ys,
keep_prob: 1.0
})
print('Iteration: ' + str(iter_i) + ' Loss: ' + str(loss))
sess.run(optimizer, feed_dict={
x: batch_xs, y: batch_ys, keep_prob: 0.8})
print('Accuracy (%d): ' % epoch_i + str(sess.run(accuracy,
feed_dict={
x: X_valid,
y: Y_valid,
keep_prob: 1.0
})))
# theta = sess.run(h_fc_loc2, feed_dict={
# x: batch_xs, keep_prob: 1.0})
# print(theta[0])
| apache-2.0 |
Robobench/rapman-subuser | logic/subuserlib/classes/runtime.py | 1 | 7660 | #!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
"""
Runtime environements which are prepared for subusers to run in.
"""
#external imports
import sys,collections,os
#internal imports
import subuserlib.classes.userOwnedObject
def getRecursiveDirectoryContents(directory):
files = []
for (directory,_,fileList) in os.walk(directory):
for fileName in fileList:
files.append(os.path.join(directory,fileName))
return files
class Runtime(subuserlib.classes.userOwnedObject.UserOwnedObject):
__runReadyImageId = None
__subuser = None
__environment = None
def __init__(self,user,subuser,runReadyImageId,environment):
subuserlib.classes.userOwnedObject.UserOwnedObject.__init__(self,user)
self.__subuser = subuser
self.__runReadyImageId = runReadyImageId
self.__environment = environment
def getSubuser(self):
return self.__subuser
def getRunreadyImageId(self):
return self.__runReadyImageId
def getEnvironment(self):
return self.__environment
def getSerialDevices(self):
return [device for device in os.listdir("/dev/") if device.startswith("ttyS") or device.startswith("ttyUSB") or device.startswith("ttyACM")]
def getBasicFlags(self):
return [
"-i",
"-t",
"--rm",
"--entrypoint=/bin/bash"]
def passOnEnvVar(self,envVar):
"""
Generate the arguments required to pass on a given ENV var to the container from the host.
"""
try:
return ["-e",envVar+"="+self.getEnvironment()[envVar]]
except KeyError:
return []
def getSoundArgs(self):
soundArgs = []
if os.path.exists("/dev/snd"):
soundArgs += ["--volume=/dev/snd:/dev/snd"]
soundArgs += ["--device=/dev/snd/"+device for device in os.listdir("/dev/snd") if not device == "by-id" and not device == "by-path"]
if os.path.exists("/dev/dsp"):
soundArgs += ["--volume=/dev/dsp:/dev/dsp"]
soundArgs += ["--device=/dev/dsp/"+device for device in os.listdir("/dev/dsp")]
return soundArgs
def getGraphicsArgs(self):
graphicsArgs = []
cardInd = 0
graphicsArgs += ["--device=/dev/dri/"+device for device in os.listdir("/dev/dri")]
# Get NVidia devices
nvidiaArgs = ["--device=/dev/" + device for device in os.listdir("/dev") if "nvidia" in device]
atiArgs = []
if os.access("/dev/ati", os.F_OK | os.R_OK):
atiArgs = ["--device=/dev/ati/" + device for device in os.listdir("/dev/ati")]
print nvidiaArgs
print atiArgs
graphicsArgs += ["-e","QT_GRAPHICSSYSTEM=native"] + atiArgs + nvidiaArgs
return graphicsArgs
def getPortArgs(self, ports):
""" Get the port mapping permissions
"""
portArgs = []
for port in ports:
portArgs += ["--publish=%s"%(port)]
return portArgs
def getHomeArgs(self, home):
homeArgs = []
if home:
homeArgs = ["-v="+self.getSubuser().getHomeDirOnHost()+":"+self.getSubuser().getDockersideHome()+":rw","-e","HOME="+self.getSubuser().getDockersideHome() ]
return homeArgs
def setupX11Access(self, xauthFilename):
import subprocess
""" Make sure the AUTH exists -- Pythonic "touch" """
with open(xauthFilename, 'a'):
os.utime(xauthFilename, None)
display=os.environ['DISPLAY']
args="xauth nlist %s | sed -e 's/^..../ffff/' | xauth -f %s nmerge -"%(display, xauthFilename)
p = subprocess.Popen(args , shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print p.stdout.read()
def getX11Args(self):
socketFilename="/tmp/.X11-unix"
xauthFilename="/tmp/.docker.xauth"
self.setupX11Access(xauthFilename)
args = ["-v=%s:%s"%(socketFilename, socketFilename), "-v=%s:%s"%(xauthFilename, xauthFilename)]
args += ["-e","DISPLAY=unix"+os.environ['DISPLAY'], "-e", "XAUTHORITY=%s"%(xauthFilename)]
return args
def getPermissionFlagDict(self):
"""
This is a dictionary mapping permissions to functions which when given the permission's values return docker run flags.
"""
return collections.OrderedDict([
# Conservative permissions
("stateful-home", lambda p : self.getHomeArgs(p)),
("inherit-locale", lambda p : self.passOnEnvVar("LANG")+self.passOnEnvVar("LANGUAGE") if p else []),
("inherit-timezone", lambda p : self.passOnEnvVar("TZ")+["-v=/etc/localtime:/etc/localtime:r"] if p else []),
# Moderate permissions
("user-dirs", lambda userDirs : ["-v="+os.path.join(self.getSubuser().getUser().homeDir,userDir)+":"+os.path.join("/userdirs/",userDir)+":rw" for userDir in userDirs]),
("sound-card", lambda p: self.getSoundArgs() if p else []),
("webcam", lambda p: ["--device=/dev/"+device for device in os.listdir("/dev/") if device.startswith("video")] if p else []),
("access-working-directory", lambda p: ["-v="+os.getcwd()+":/pwd:rw","--workdir=/pwd"] if p else ["--workdir="+self.getSubuser().getDockersideHome()]),
("allow-network-access", lambda p: ["--net=bridge","--dns=8.8.8.8"] if p else ["--net=none"]),
("ports", lambda ports: self.getPortArgs(ports) if ports else []),
# Liberal permissions
("x11", lambda p: self.getX11Args() if p else []),
("graphics-card", lambda p: self.getGraphicsArgs() if p else []),
("access-host-docker", lambda p: ["--volume=/var/run/docker.sock:/subuser/host.docker.sock -e DOCKER_HOST=/subuser/host.docker.sock"]),
("serial-devices", lambda sd: ["--device=/dev/"+device for device in self.getSerialDevices()] if sd else []),
("system-dbus", lambda dbus: ["--volume=/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket"] if dbus else []),
("as-root", lambda root: ["--user=0"] if root else ["--user="+str(os.getuid())]),
# Anarchistic permissions
("privileged", lambda p: ["--privileged"] if p else [])])
def getCommand(self,args):
"""
Returns the command required to run the subuser as a list of string arguments.
"""
flags = self.getBasicFlags()
permissionFlagDict = self.getPermissionFlagDict()
permissions = self.getSubuser().getPermissions()
for permission, flagGenerator in permissionFlagDict.items():
flags.extend(flagGenerator(permissions[permission]))
executableargs = [self.getSubuser().getPermissions()["executable"]]
if len(args) == 1 and args[0] == '--enter':
executableargs = []
print("Entering docker container")
args = []
return ["run"]+flags+[self.getRunreadyImageId()]+executableargs+args
def getPrettyCommand(self,args):
"""
Get a command for pretty printing for use with dry-run.
"""
command = self.getCommand(args)
return "docker '"+"' '".join(command)+"'"
def run(self,args):
if not self.getSubuser().getPermissions()["executable"]:
sys.exit("Cannot run subuser, no executable configured in permissions.json file.")
def setupSymlinks():
symlinkPath = os.path.join(self.getSubuser().getHomeDirOnHost(),"Userdirs")
destinationPath = "/userdirs"
if not os.path.exists(symlinkPath):
try:
os.makedirs(self.getSubuser().getHomeDirOnHost())
except OSError:
pass
try:
os.symlink(destinationPath,symlinkPath) #Arg, why are source and destination switched?
#os.symlink(where does the symlink point to, where is the symlink)
#I guess it's to be like cp...
except OSError:
pass
if self.getSubuser().getPermissions()["stateful-home"]:
setupSymlinks()
self.getUser().getDockerDaemon().execute(self.getCommand(args))
| lgpl-3.0 |
zer0yu/ZEROScan | thirdparty/requests/packages/urllib3/util/wait.py | 8 | 1491 | from .selectors import (
HAS_SELECT,
DefaultSelector,
EVENT_READ,
EVENT_WRITE
)
def _wait_for_io_events(socks, events, timeout=None):
""" Waits for IO events to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be interacted with immediately. """
if not HAS_SELECT:
raise ValueError('Platform does not have a selector')
if not isinstance(socks, list):
# Probably just a single socket.
if hasattr(socks, "fileno"):
socks = [socks]
# Otherwise it might be a non-list iterable.
else:
socks = list(socks)
with DefaultSelector() as selector:
for sock in socks:
selector.register(sock, events)
return [key[0].fileobj for key in
selector.select(timeout) if key[1] & events]
def wait_for_read(socks, timeout=None):
""" Waits for reading to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be read from immediately. """
return _wait_for_io_events(socks, EVENT_READ, timeout)
def wait_for_write(socks, timeout=None):
""" Waits for writing to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be written to immediately. """
return _wait_for_io_events(socks, EVENT_WRITE, timeout)
| mit |
lilydjwg/udt_py | test_udt.py | 1 | 6280 | #!/usr/bin/env python3
import sys
import socket
import unittest
import udt
import _udt
class TestSocket(unittest.TestCase):
def create_socket(self):
return udt.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
def create_int_socket(self):
return _udt.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
def test_init(self):
s = self.create_socket()
self.assertEquals(s.family, socket.AF_INET)
self.assertEquals(s.type, socket.SOCK_STREAM)
self.assertEquals(s.proto, 0)
def test_not_enough_args_init(self):
self.assertRaises(TypeError, udt.socket, ())
def test_close(self):
s = self.create_socket()
# perhaps this should fail since it was never open?
s.close()
def test_double_close(self):
s = self.create_socket()
s.close()
self.assertRaises(RuntimeError, s.close, ())
def test_connect_bad_args(self):
addr = ("192.168.0.1", 2222)
s = self.create_int_socket()
# 0 args
self.assertRaises(TypeError, s.connect, ())
# 1 arg
self.assertRaises(TypeError, s.connect, "localhost", 22, )
# string port
self.assertRaises(TypeError, s.connect, ("localhost", "22"))
def test_connect_no_listen(self):
s = self.create_socket()
self.assertRaises(RuntimeError, s.connect, ("127.0.0.1", 2344))
self.assertRaises(RuntimeError, s.connect, ("localhost", 2344))
def test_bind_ok(self):
s = self.create_socket()
s.bind(("127.0.0.1", 3333))
def test_startup(self):
udt.startup()
def test_cleanup(self):
udt.cleanup()
def test_socket_fileno(self):
s = self.create_socket()
self.assertTrue(isinstance(s.fileno(), int))
def test_getset_sockopt_mss(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_MSS)
self.assertEquals(val, 1500)
s.setsockopt(0, udt.UDT_MSS, 1800)
val = s.getsockopt(0, udt.UDT_MSS)
self.assertEquals(val, 1800)
def test_getset_sockopt_sndsyn(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_SNDSYN)
self.assertEquals(val, True)
s.setsockopt(0, udt.UDT_SNDSYN, False)
val = s.getsockopt(0, udt.UDT_SNDSYN)
self.assertEquals(val, False)
def test_getset_sockopt_rcvsyn(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_RCVSYN)
self.assertEquals(val, True)
def test_getset_sockopt_fc(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_FC)
self.assertEquals(val, 25600)
s.setsockopt(0, udt.UDT_FC, 10000)
val = s.getsockopt(0, udt.UDT_FC)
self.assertEquals(val, 10000)
def test_getset_sockopt_sndbuf(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_SNDBUF)
self.assertEquals(val, 12058624)
s.setsockopt(0, udt.UDT_SNDBUF, 198720)
val = s.getsockopt(0, udt.UDT_SNDBUF)
self.assertEquals(val, 198720)
def test_getsockopt_rcvbuf(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_RCVBUF)
self.assertEquals(val, 12058624)
s.setsockopt(0, udt.UDT_RCVBUF, 198720)
val = s.getsockopt(0, udt.UDT_RCVBUF)
self.assertEquals(val, 198720)
def test_getsockopt_udp_sndbuf(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDP_SNDBUF)
s.setsockopt(0, udt.UDP_SNDBUF, 10000)
val = s.getsockopt(0, udt.UDP_SNDBUF)
self.assertEquals(val, 10000)
def test_getsockopt_udp_rcvbuf(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDP_RCVBUF)
def test_getsockopt_snd_timeout(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_SNDTIMEO)
self.assertEquals(val, -1)
def test_getsockopt_rcv_timeout(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_RCVTIMEO)
self.assertEquals(val, -1)
def test_getsockopt_reuseaddr(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_REUSEADDR)
self.assertEquals(val, True)
def test_getsockopt_linger(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_LINGER)
self.assertEquals(val, (1, 180))
def test_getsockopt_max_bw(self):
s = self.create_socket()
val = s.getsockopt(0, udt.UDT_MAXBW)
self.assertEquals(val, -1)
def test_create_epoll(self):
epoll = udt.epoll()
self.assertTrue(epoll.eid)
def test_epoll_release(self):
epoll = udt.epoll()
epoll.release()
def test_epoll_double_release(self):
epoll = udt.epoll()
epoll.release()
self.assertRaises(RuntimeError, epoll.release)
def test_epoll_add_usock(self):
epoll = udt.epoll()
s = self.create_socket()
self.assertEquals(0, epoll.add_usock(s.fileno(), udt.UDT_EPOLL_IN))
def test_epoll_add_ssock(self):
epoll = udt.epoll()
s1, s2 = socket.socketpair()
epoll.add_ssock(s1.fileno(), udt.UDT_EPOLL_IN)
def test_epoll_remove_usock(self):
epoll = udt.epoll()
s = self.create_socket()
epoll.add_usock(s.fileno(), udt.UDT_EPOLL_IN)
epoll.remove_usock(s.fileno(), udt.UDT_EPOLL_IN)
def test_epoll_remove_bad_usock(self):
epoll = udt.epoll()
s = self.create_socket()
fileno = s.fileno()
s.close()
self.assertRaises(RuntimeError, epoll.remove_usock, fileno, udt.UDT_EPOLL_IN)
# FIXME - broken functionality in UDT ?
def _test_epoll_remove_ssock(self):
epoll = udt.epoll()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 22))
self.assertEquals(
epoll.add_ssock(s.fileno(), udt.UDT_EPOLL_IN),
0
)
epoll.remove_ssock(s.fileno(), udt.UDT_EPOLL_IN)
def test_epoll_wait(self):
s = self.create_socket()
epoll = udt.epoll()
epoll.add_usock(s.fileno(), udt.UDT_EPOLL_IN)
print(epoll.epoll_wait(0))
print(epoll.epoll_wait(1))
unittest.main()
| bsd-3-clause |
bmakarenko/gost-crypto-gui | gostcryptogui/mainwindow.py | 1 | 8305 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Tue May 16 14:32:44 2017
# by: PyQt4 UI code generator 4.6.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(265, 276)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("/usr/share/pixmaps/gost-crypto-gui.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.btnSign = QtGui.QPushButton(self.centralwidget)
self.btnSign.setObjectName("btnSign")
self.verticalLayout.addWidget(self.btnSign)
self.btnVerify = QtGui.QPushButton(self.centralwidget)
self.btnVerify.setObjectName("btnVerify")
self.verticalLayout.addWidget(self.btnVerify)
self.btnEncrypt = QtGui.QPushButton(self.centralwidget)
self.btnEncrypt.setObjectName("btnEncrypt")
self.verticalLayout.addWidget(self.btnEncrypt)
self.btnDecrypt = QtGui.QPushButton(self.centralwidget)
self.btnDecrypt.setObjectName("btnDecrypt")
self.verticalLayout.addWidget(self.btnDecrypt)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 265, 22))
self.menubar.setObjectName("menubar")
self.menu = QtGui.QMenu(self.menubar)
self.menu.setObjectName("menu")
self.cryptoprovider = QtGui.QMenu(self.menu)
self.cryptoprovider.setObjectName("cryptoprovider")
self.encoding = QtGui.QMenu(self.menu)
self.encoding.setObjectName("encoding")
self.sign_check = QtGui.QMenu(self.menu)
self.sign_check.setObjectName("sign_check")
self.dettached = QtGui.QMenu(self.menu)
self.dettached.setObjectName("dettached")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.action_CSP = QtGui.QAction(MainWindow)
self.action_CSP.setCheckable(True)
self.action_CSP.setChecked(True)
self.action_CSP.setObjectName("action_CSP")
self.actionOpenSSL = QtGui.QAction(MainWindow)
self.actionOpenSSL.setCheckable(True)
self.actionOpenSSL.setEnabled(False)
self.actionOpenSSL.setObjectName("actionOpenSSL")
self.actionDER = QtGui.QAction(MainWindow)
self.actionDER.setCheckable(True)
self.actionDER.setChecked(True)
self.actionDER.setObjectName("actionDER")
self.actionBase64 = QtGui.QAction(MainWindow)
self.actionBase64.setCheckable(True)
self.actionBase64.setObjectName("actionBase64")
self.actionSignCheckOn = QtGui.QAction(MainWindow)
self.actionSignCheckOn.setCheckable(True)
self.actionSignCheckOn.setChecked(True)
self.actionSignCheckOn.setObjectName("actionSignCheckOn")
self.actionSignCheckOff = QtGui.QAction(MainWindow)
self.actionSignCheckOff.setCheckable(True)
self.actionSignCheckOff.setChecked(False)
self.actionSignCheckOff.setObjectName("actionSignCheckOff")
self.actionDettachedOn = QtGui.QAction(MainWindow)
self.actionDettachedOn.setCheckable(True)
self.actionDettachedOn.setObjectName("actionDettachedOn")
self.actionDettachedOff = QtGui.QAction(MainWindow)
self.actionDettachedOff.setCheckable(True)
self.actionDettachedOff.setChecked(True)
self.actionDettachedOff.setObjectName("actionDettachedOff")
self.cryptoprovider.addAction(self.action_CSP)
self.cryptoprovider.addAction(self.actionOpenSSL)
self.encoding.addAction(self.actionDER)
self.encoding.addAction(self.actionBase64)
self.sign_check.addAction(self.actionSignCheckOn)
self.sign_check.addAction(self.actionSignCheckOff)
self.dettached.addAction(self.actionDettachedOn)
self.dettached.addAction(self.actionDettachedOff)
self.menu.addAction(self.cryptoprovider.menuAction())
self.menu.addAction(self.encoding.menuAction())
self.menu.addAction(self.sign_check.menuAction())
self.menu.addAction(self.dettached.menuAction())
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "gost-crypto-gui", None, QtGui.QApplication.UnicodeUTF8))
self.btnSign.setStatusTip(QtGui.QApplication.translate("MainWindow", "Подписать указанный файл", None, QtGui.QApplication.UnicodeUTF8))
self.btnSign.setText(QtGui.QApplication.translate("MainWindow", "Подписать файл(ы)", None, QtGui.QApplication.UnicodeUTF8))
self.btnVerify.setStatusTip(QtGui.QApplication.translate("MainWindow", "Проверить ЭЦП подписанного файла", None, QtGui.QApplication.UnicodeUTF8))
self.btnVerify.setText(QtGui.QApplication.translate("MainWindow", "Проверить подпись", None, QtGui.QApplication.UnicodeUTF8))
self.btnEncrypt.setStatusTip(QtGui.QApplication.translate("MainWindow", "Зашифровать указанный файл", None, QtGui.QApplication.UnicodeUTF8))
self.btnEncrypt.setText(QtGui.QApplication.translate("MainWindow", "Зашифровать файл(ы)", None, QtGui.QApplication.UnicodeUTF8))
self.btnDecrypt.setStatusTip(QtGui.QApplication.translate("MainWindow", "Расшифровать файл", None, QtGui.QApplication.UnicodeUTF8))
self.btnDecrypt.setText(QtGui.QApplication.translate("MainWindow", "Расшифровать файл(ы)", None, QtGui.QApplication.UnicodeUTF8))
self.menu.setTitle(QtGui.QApplication.translate("MainWindow", "Опции", None, QtGui.QApplication.UnicodeUTF8))
self.cryptoprovider.setStatusTip(QtGui.QApplication.translate("MainWindow", "Выбрать используемый криптопровайдер", None, QtGui.QApplication.UnicodeUTF8))
self.cryptoprovider.setTitle(QtGui.QApplication.translate("MainWindow", "Криптопровайдер", None, QtGui.QApplication.UnicodeUTF8))
self.encoding.setTitle(QtGui.QApplication.translate("MainWindow", "Кодировка файлов", None, QtGui.QApplication.UnicodeUTF8))
self.sign_check.setTitle(QtGui.QApplication.translate("MainWindow", "Авт. проверка подписи", None, QtGui.QApplication.UnicodeUTF8))
self.dettached.setTitle(QtGui.QApplication.translate("MainWindow", "Отсоединенная подпись", None, QtGui.QApplication.UnicodeUTF8))
self.action_CSP.setText(QtGui.QApplication.translate("MainWindow", "КриптоПро CSP", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpenSSL.setText(QtGui.QApplication.translate("MainWindow", "OpenSSL", None, QtGui.QApplication.UnicodeUTF8))
self.actionDER.setText(QtGui.QApplication.translate("MainWindow", "DER", None, QtGui.QApplication.UnicodeUTF8))
self.actionBase64.setText(QtGui.QApplication.translate("MainWindow", "base64", None, QtGui.QApplication.UnicodeUTF8))
self.actionSignCheckOn.setText(QtGui.QApplication.translate("MainWindow", "Включено", None, QtGui.QApplication.UnicodeUTF8))
self.actionSignCheckOff.setText(QtGui.QApplication.translate("MainWindow", "Выключено", None, QtGui.QApplication.UnicodeUTF8))
self.actionDettachedOn.setText(QtGui.QApplication.translate("MainWindow", "Включено", None, QtGui.QApplication.UnicodeUTF8))
self.actionDettachedOff.setText(QtGui.QApplication.translate("MainWindow", "Выключено", None, QtGui.QApplication.UnicodeUTF8))
| mit |
rajsadho/django | tests/generic_views/test_list.py | 309 | 12129 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.utils.encoding import force_str
from django.views.generic.base import View
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class ListViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIsInstance(res.context['view'], View)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
def test_explicitly_ordered_list_view(self):
Book.objects.create(name="Zebras for Dummies", pages=800, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/list/books/sorted/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, '2066')
self.assertEqual(res.context['object_list'][1].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][2].name, 'Zebras for Dummies')
res = self.client.get('/list/books/sortedbypagesandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][1].name, 'Zebras for Dummies')
self.assertEqual(res.context['object_list'][2].name, '2066')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| bsd-3-clause |
dittert/pyprobe | src/pyprobe/sensors/pegasus/sensor_enclosure.py | 1 | 3128 | # coding=utf-8
import platform
import subprocess
from pyprobe.sensors import *
from pyprobe.sensors.pegasus.enclosure import EnclosureParser
from pyprobe.sensors.pegasus.helper import determine_executable, determine_controllers
from pyprobe.sensors.process_helper import get_outputs_of_process
from pyprobe.utils import to_bytes
__author__ = 'Dirk Dittert'
class PegasusEnclosureSensor(BaseSensor):
KIND = u"pegasusenclosure"
ERROR_CODE_EXECUTION_ERROR = 1
ERROR_CODE_FAN_ERROR = 2
ERROR_CODE_TEMPERATURE_ERROR = 3
def define(self, configuration):
if platform.system() != "Darwin":
# sensor is only applicable on OS X systems.
return None
controllers = determine_controllers(configuration)
if len(controllers) != 1:
# no controllers available in system
return None
result = SensorDescription(u"Promise Pegasus Enclosure", self.KIND)
result.description = u"Monitort ein Promise Pegasus RAID-Gehäuse."
return result
def execute(self, sensorid, host, parameters, configuration):
executable = determine_executable(configuration)
command = u"LC_ALL=C {0} -C enclosure -e 1 -v".format(executable)
proc = subprocess.Popen(to_bytes(command), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, out = get_outputs_of_process(proc)
if proc.returncode != 0:
message = u"Ausführung von {} ist mit Fehlercode {} gescheitert.".format(executable, proc.returncode)
return SensorError(sensorid, self.ERROR_CODE_EXECUTION_ERROR, message, ErrorType.RESPONSE)
parser = EnclosureParser(out)
# Check fan speed
fan = parser.fan()
if not fan.ok():
if fan.speed is not None:
message = u"Lüfterdrehzahl {} RPM ist unterhalb " \
u"der Minimaldrehzahl von {} RPM.".format(fan.speed, fan.threshold)
else:
message = u"Lüfterdrehzahl konnte nicht ermittelt werden. Möglicher Lüfterdefekt!"
return SensorError(sensorid, self.ERROR_CODE_FAN_ERROR, message, ErrorType.DATA)
# Check temperature thresholds
for sensor in parser.temperatures():
if not sensor.ok():
message = u"Temperatur {}°C von Sensor '{}' " \
u"liegt über dem Grenzwert {}°C " \
u"(Sensorstatus: {}).".format(sensor.value, sensor.name, sensor.threshold, sensor.status)
return SensorError(sensorid, self.ERROR_CODE_TEMPERATURE_ERROR, message, ErrorType.DATA)
result = SensorResult(sensorid)
for sensor in parser.temperatures():
name = u"{} Temperatur".format(sensor.name)
channel = SensorChannel(name, ModeType.INTEGER, ValueType.TEMPERATURE, sensor.value)
result.channel.append(channel)
channel = SensorChannel(u"Lüfterdrehzahl", ModeType.INTEGER, ValueType.CUSTOM, fan.speed, u"RPM")
result.channel.append(channel)
return result | apache-2.0 |
fightingwalrus/gerbmerge | gerbmerge/config.py | 4 | 14366 | #!/usr/bin/env python
"""
Parse the GerbMerge configuration file.
--------------------------------------------------------------------
This program is licensed under the GNU General Public License (GPL)
Version 3. See http://www.fsf.org for details of the license.
Rugged Circuits LLC
http://ruggedcircuits.com/gerbmerge
"""
import sys
import ConfigParser
import re
import string
import jobs
import aptable
# Configuration dictionary. Specify floats as strings. Ints can be specified
# as ints or strings.
Config = {
'xspacing': '0.125', # Spacing in horizontal direction
'yspacing': '0.125', # Spacing in vertical direction
'panelwidth': '12.6', # X-Dimension maximum panel size (Olimex)
'panelheight': '7.8', # Y-Dimension maximum panel size (Olimex)
'cropmarklayers': None, # e.g., *toplayer,*bottomlayer
'cropmarkwidth': '0.01', # Width (inches) of crop lines
'cutlinelayers': None, # as for cropmarklayers
'cutlinewidth': '0.01', # Width (inches) of cut lines
'minimumfeaturesize': 0, # Minimum dimension for selected layers
'toollist': None, # Name of file containing default tool list
'drillclustertolerance': '.002', # Tolerance for clustering drill sizes
'allowmissinglayers': 0, # Set to 1 to allow multiple jobs to have non-matching layers
'fabricationdrawingfile': None, # Name of file to which to write fabrication drawing, or None
'fabricationdrawingtext': None, # Name of file containing text to write to fab drawing
'excellondecimals': 4, # Number of digits after the decimal point in input Excellon files
'excellonleadingzeros': 0, # Generate leading zeros in merged Excellon output file
'outlinelayerfile': None, # Name of file to which to write simple box outline, or None
'scoringfile': None, # Name of file to which to write scoring data, or None
'leftmargin': 0, # Inches of extra room to leave on left side of panel for tooling
'topmargin': 0, # Inches of extra room to leave on top side of panel for tooling
'rightmargin': 0, # Inches of extra room to leave on right side of panel for tooling
'bottommargin': 0, # Inches of extra room to leave on bottom side of panel for tooling
'fiducialpoints': None, # List of X,Y co-ordinates at which to draw fiducials
'fiducialcopperdiameter': 0.08, # Diameter of copper part of fiducial
'fiducialmaskdiameter': 0.32, # Diameter of fiducial soldermask opening
}
# This dictionary is indexed by lowercase layer name and has as values a file
# name to use for the output.
MergeOutputFiles = {
'boardoutline': 'merged.boardoutline.ger',
'drills': 'merged.drills.xln',
'placement': 'merged.placement.txt',
'toollist': 'merged.toollist.drl'
}
# The global aperture table, indexed by aperture code (e.g., 'D10')
GAT = {}
# The global aperture macro table, indexed by macro name (e.g., 'M3', 'M4R' for rotated macros)
GAMT = {}
# The list of all jobs loaded, indexed by job name (e.g., 'PowerBoard')
Jobs = {}
# The set of all Gerber layer names encountered in all jobs. Doesn't
# include drills.
LayerList = {'boardoutline': 1}
# The tool list as read in from the DefaultToolList file in the configuration
# file. This is a dictionary indexed by tool name (e.g., 'T03') and
# a floating point number as the value, the drill diameter in inches.
DefaultToolList = {}
# The GlobalToolMap dictionary maps tool name to diameter in inches. It
# is initially empty and is constructed after all files are read in. It
# only contains actual tools used in jobs.
GlobalToolMap = {}
# The GlobalToolRMap dictionary is a reverse dictionary of ToolMap, i.e., it maps
# diameter to tool name.
GlobalToolRMap = {}
##############################################################################
# This configuration option determines whether trimGerber() is called
TrimGerber = 1
# This configuration option determines whether trimExcellon() is called
TrimExcellon = 1
# This configuration option determines the minimum size of feature dimensions for
# each layer. It is a dictionary indexed by layer name (e.g. '*topsilkscreen') and
# has a floating point number as the value (in inches).
MinimumFeatureDimension = {}
# This configuration option is a positive integer that determines the maximum
# amout of time to allow for random placements (seconds). A SearchTimeout of 0
# indicates that no timeout should occur and random placements will occur
# forever until a KeyboardInterrupt is raised.
SearchTimeout = 0
# Construct the reverse-GAT/GAMT translation table, keyed by aperture/aperture macro
# hash string. The value is the aperture code (e.g., 'D10') or macro name (e.g., 'M5').
def buildRevDict(D):
RevD = {}
for key,val in D.items():
RevD[val.hash()] = key
return RevD
def parseStringList(L):
"""Parse something like '*toplayer, *bottomlayer' into a list of names
without quotes, spaces, etc."""
if 0:
if L[0]=="'":
if L[-1] != "'":
raise RuntimeError, "Illegal configuration string '%s'" % L
L = L[1:-1]
elif L[0]=='"':
if L[-1] != '"':
raise RuntimeError, "Illegal configuration string '%s'" % L
L = L[1:-1]
# This pattern matches quotes at the beginning and end...quotes must match
quotepat = re.compile(r'^([' "'" '"' r']?)([^\1]*)\1$')
delimitpat = re.compile(r'[ \t]*[,;][ \t]*')
match = quotepat.match(L)
if match:
L = match.group(2)
return delimitpat.split(L)
# Parse an Excellon tool list file of the form
#
# T01 0.035in
# T02 0.042in
def parseToolList(fname):
TL = {}
try:
fid = file(fname, 'rt')
except Exception, detail:
raise RuntimeError, "Unable to open tool list file '%s':\n %s" % (fname, str(detail))
pat_in = re.compile(r'\s*(T\d+)\s+([0-9.]+)\s*in\s*')
pat_mm = re.compile(r'\s*(T\d+)\s+([0-9.]+)\s*mm\s*')
pat_mil = re.compile(r'\s*(T\d+)\s+([0-9.]+)\s*(?:mil)?')
for line in fid.xreadlines():
line = string.strip(line)
if (not line) or (line[0] in ('#', ';')): continue
mm = 0
mil = 0
match = pat_in.match(line)
if not match:
mm = 1
match = pat_mm.match(line)
if not match:
mil = 1
match = pat_mil.match(line)
if not match:
continue
#raise RuntimeError, "Illegal tool list specification:\n %s" % line
tool, size = match.groups()
try:
size = float(size)
except:
raise RuntimeError, "Tool size in file '%s' is not a valid floating-point number:\n %s" % (fname,line)
if mil:
size = size*0.001 # Convert mil to inches
elif mm:
size = size/25.4 # Convert mm to inches
# Canonicalize tool so that T1 becomes T01
tool = 'T%02d' % int(tool[1:])
if TL.has_key(tool):
raise RuntimeError, "Tool '%s' defined more than once in tool list file '%s'" % (tool,fname)
TL[tool]=size
fid.close()
return TL
# This function parses the job configuration file and does
# everything needed to:
#
# * parse global options and store them in the Config dictionary
# as natural types (i.e., ints, floats, lists)
#
# * Read Gerber/Excellon data and populate the Jobs dictionary
#
# * Read Gerber/Excellon data and populate the global aperture
# table, GAT, and the global aperture macro table, GAMT
#
# * read the tool list file and populate the DefaultToolList dictionary
def parseConfigFile(fname, Config=Config, Jobs=Jobs):
global DefaultToolList
CP = ConfigParser.ConfigParser()
CP.readfp(file(fname,'rt'))
# First parse global options
if CP.has_section('Options'):
for opt in CP.options('Options'):
# Is it one we expect
if Config.has_key(opt):
# Yup...override it
Config[opt] = CP.get('Options', opt)
elif CP.defaults().has_key(opt):
pass # Ignore DEFAULTS section keys
elif opt in ('fabricationdrawing', 'outlinelayer'):
print '*'*73
print '\nThe FabricationDrawing and OutlineLayer configuration options have been'
print 'renamed as of GerbMerge version 1.0. Please consult the documentation for'
print 'a description of the new options, then modify your configuration file.\n'
print '*'*73
sys.exit(1)
else:
raise RuntimeError, "Unknown option '%s' in [Options] section of configuration file" % opt
else:
raise RuntimeError, "Missing [Options] section in configuration file"
# Ensure we got a tool list
if not Config.has_key('toollist'):
raise RuntimeError, "INTERNAL ERROR: Missing tool list assignment in [Options] section"
# Make integers integers, floats floats
for key,val in Config.items():
try:
val = int(val)
Config[key]=val
except:
try:
val = float(val)
Config[key]=val
except:
pass
# Process lists of strings
if Config['cutlinelayers']:
Config['cutlinelayers'] = parseStringList(Config['cutlinelayers'])
if Config['cropmarklayers']:
Config['cropmarklayers'] = parseStringList(Config['cropmarklayers'])
# Process list of minimum feature dimensions
if Config['minimumfeaturesize']:
temp = Config['minimumfeaturesize'].split(",")
try:
for index in range(0, len(temp), 2):
MinimumFeatureDimension[ temp[index] ] = float( temp[index + 1] )
except:
raise RuntimeError, "Illegal configuration string:" + Config['minimumfeaturesize']
# Process MergeOutputFiles section to set output file names
if CP.has_section('MergeOutputFiles'):
for opt in CP.options('MergeOutputFiles'):
# Each option is a layer name and the output file for this name
if opt[0]=='*' or opt in ('boardoutline', 'drills', 'placement', 'toollist'):
MergeOutputFiles[opt] = CP.get('MergeOutputFiles', opt)
# Now, we go through all jobs and collect Gerber layers
# so we can construct the Global Aperture Table.
apfiles = []
for jobname in CP.sections():
if jobname=='Options': continue
if jobname=='MergeOutputFiles': continue
if jobname=='GerbMergeGUI': continue
# Ensure all jobs have a board outline
if not CP.has_option(jobname, 'boardoutline'):
raise RuntimeError, "Job '%s' does not have a board outline specified" % jobname
if not CP.has_option(jobname, 'drills'):
raise RuntimeError, "Job '%s' does not have a drills layer specified" % jobname
for layername in CP.options(jobname):
if layername[0]=='*' or layername=='boardoutline':
fname = CP.get(jobname, layername)
apfiles.append(fname)
if layername[0]=='*':
LayerList[layername]=1
# Now construct global aperture tables, GAT and GAMT. This step actually
# reads in the jobs for aperture data but doesn't store Gerber
# data yet.
aptable.constructApertureTable(apfiles)
del apfiles
if 0:
keylist = GAMT.keys()
keylist.sort()
for key in keylist:
print '%s' % GAMT[key]
sys.exit(0)
# Parse the tool list
if Config['toollist']:
DefaultToolList = parseToolList(Config['toollist'])
# Now get jobs. Each job implies layer names, and we
# expect consistency in layer names from one job to the
# next. Two reserved layer names, however, are
# BoardOutline and Drills.
Jobs.clear()
do_abort = 0
errstr = 'ERROR'
if Config['allowmissinglayers']:
errstr = 'WARNING'
for jobname in CP.sections():
if jobname=='Options': continue
if jobname=='MergeOutputFiles': continue
if jobname=='GerbMergeGUI': continue
print 'Reading data from', jobname, '...'
J = jobs.Job(jobname)
# Parse the job settings, like tool list, first, since we are not
# guaranteed to have ConfigParser return the layers in the same order that
# the user wrote them, and we may get Gerber files before we get a tool
# list! Same thing goes for ExcellonDecimals. We need to know what this is
# before parsing any Excellon files.
for layername in CP.options(jobname):
fname = CP.get(jobname, layername)
if layername == 'toollist':
J.ToolList = parseToolList(fname)
elif layername=='excellondecimals':
try:
J.ExcellonDecimals = int(fname)
except:
raise RuntimeError, "Excellon decimals '%s' in config file is not a valid integer" % fname
elif layername=='repeat':
try:
J.Repeat = int(fname)
except:
raise RuntimeError, "Repeat count '%s' in config file is not a valid integer" % fname
for layername in CP.options(jobname):
fname = CP.get(jobname, layername)
if layername=='boardoutline':
J.parseGerber(fname, layername, updateExtents=1)
elif layername[0]=='*':
J.parseGerber(fname, layername, updateExtents=0)
elif layername=='drills':
J.parseExcellon(fname)
# Emit warnings if some layers are missing
LL = LayerList.copy()
for layername in J.apxlat.keys():
assert LL.has_key(layername)
del LL[layername]
if LL:
if errstr=='ERROR':
do_abort=1
print '%s: Job %s is missing the following layers:' % (errstr, jobname)
for layername in LL.keys():
print ' %s' % layername
# Store the job in the global Jobs dictionary, keyed by job name
Jobs[jobname] = J
if do_abort:
raise RuntimeError, 'Exiting since jobs are missing layers. Set AllowMissingLayers=1\nto override.'
if __name__=="__main__":
CP = parseConfigFile(sys.argv[1])
print Config
sys.exit(0)
if 0:
for key, val in CP.defaults().items():
print '%s: %s' % (key,val)
for section in CP.sections():
print '[%s]' % section
for opt in CP.options(section):
print ' %s=%s' % (opt, CP.get(section, opt))
| gpl-3.0 |
miguelfervi/SSBW-Restaurantes | restaurantes/lib/python2.7/site-packages/django/db/models/fields/__init__.py | 24 | 88809 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
from django.db import connection, connections, router
from django.db.models.query_utils import QueryWrapper, RegisterLookupMixin
from django.utils import six, timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import (
RemovedInDjango20Warning, warn_about_renamed_method,
)
from django.utils.duration import duration_string
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import Promise, cached_property, curry
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty',
'Field', 'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.Iterator):
choices = list(choices)
self.choices = choices or []
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
errors.extend(self._check_deprecation_details())
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
hint=None,
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=self,
id='fields.E003',
)
]
else:
return []
@property
def rel(self):
warnings.warn(
"Usage of field.rel has been deprecated. Use field.remote_field instead.",
RemovedInDjango20Warning, 2)
return self.remote_field
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
("'choices' must be an iterable containing "
"(actual value, human readable name) tuples."),
hint=None,
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
hint=None,
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
app_label = self.model._meta.app_label
for db in connections:
if router.allow_migrate(db, app_label, model=self.model):
return connections[db].validation.check_field(self, **kwargs)
return []
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be used
by Django.
"""
return sql, params
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, 'field') and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app registry. This
# could be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
# Some validators can't be created at field initialization time.
# This method provides a way to delay their creation until required.
return self.default_validators + self._validators
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
type_string = self.db_type(connection)
try:
check_string = connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_field(self, virtual=True)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, '_prepare'):
return value._prepare(self)
if lookup_type in {
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'isnull', 'search', 'regex', 'iregex',
}:
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
return self.get_prep_value(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('search', 'regex', 'iregex', 'contains',
'icontains', 'iexact', 'startswith', 'endswith',
'istartswith', 'iendswith'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type == 'isnull':
return []
else:
return [value]
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.remote_field, 'get_related_field'):
lst = [(getattr(x, self.remote_field.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
@warn_about_renamed_method(
'Field', '_get_val_from_obj', 'value_from_object',
RemovedInDjango20Warning
)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=self,
id='fields.E120',
)
]
elif not isinstance(self.max_length, six.integer_types) or self.max_length <= 0:
return [
checks.Error(
"'max_length' must be a positive integer.",
hint=None,
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateTimeCheckMixin(object):
def check(self, **kwargs):
errors = super(DateTimeCheckMixin, self).check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now,
self.has_default()]
enabled_options = [option not in (None, False)
for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
hint=None,
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super(DateField, self).contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=self,
id='fields.E134',
)
]
return []
@cached_property
def validators(self):
return super(DecimalField, self).validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types):
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class DurationField(Field):
"""Stores timedelta objects.
Uses interval on postgres, INVERAL DAY TO SECOND on Oracle, and bigint of
microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
# Discard any fractional microseconds due to floating point arithmetic.
return int(round(value.total_seconds() * 1000000))
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super(DurationField, self).get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.DurationField,
}
defaults.update(kwargs)
return super(DurationField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
errors = super(IntegerField, self).check(**kwargs)
errors.extend(self._check_max_length_warning())
return errors
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
range_validators = []
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
range_validators.append(validators.MinValueValidator(min_value))
if max_value is not None:
range_validators.append(validators.MaxValueValidator(max_value))
return super(IntegerField, self).validators + range_validators
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
'msg': (
'IPAddressField has been removed except for support in '
'historical migrations.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.E900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, six.string_types):
value = force_text(value)
value = value.strip()
if ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length") == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
if self.allow_unicode is not False:
kwargs['allow_unicode'] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length") == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self.value_from_object(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except AttributeError:
raise TypeError(self.error_messages['invalid'] % {'value': value})
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value and not isinstance(value, uuid.UUID):
try:
return uuid.UUID(value)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
defaults = {
'form_class': forms.UUIDField,
}
defaults.update(kwargs)
return super(UUIDField, self).formfield(**defaults)
| gpl-3.0 |
Lh4cKg/sl4a | python/src/Lib/ctypes/test/test_find.py | 51 | 2472 | import unittest
import sys
from ctypes import *
from ctypes.util import find_library
from ctypes.test import is_resource_enabled
if sys.platform == "win32":
lib_gl = find_library("OpenGL32")
lib_glu = find_library("Glu32")
lib_gle = None
elif sys.platform == "darwin":
lib_gl = lib_glu = find_library("OpenGL")
lib_gle = None
else:
lib_gl = find_library("GL")
lib_glu = find_library("GLU")
lib_gle = find_library("gle")
## print, for debugging
if is_resource_enabled("printing"):
if lib_gl or lib_glu or lib_gle:
print "OpenGL libraries:"
for item in (("GL", lib_gl),
("GLU", lib_glu),
("gle", lib_gle)):
print "\t", item
# On some systems, loading the OpenGL libraries needs the RTLD_GLOBAL mode.
class Test_OpenGL_libs(unittest.TestCase):
def setUp(self):
self.gl = self.glu = self.gle = None
if lib_gl:
self.gl = CDLL(lib_gl, mode=RTLD_GLOBAL)
if lib_glu:
self.glu = CDLL(lib_glu, RTLD_GLOBAL)
if lib_gle:
try:
self.gle = CDLL(lib_gle)
except OSError:
pass
if lib_gl:
def test_gl(self):
if self.gl:
self.gl.glClearIndex
if lib_glu:
def test_glu(self):
if self.glu:
self.glu.gluBeginCurve
if lib_gle:
def test_gle(self):
if self.gle:
self.gle.gleGetJoinStyle
##if os.name == "posix" and sys.platform != "darwin":
## # On platforms where the default shared library suffix is '.so',
## # at least some libraries can be loaded as attributes of the cdll
## # object, since ctypes now tries loading the lib again
## # with '.so' appended of the first try fails.
## #
## # Won't work for libc, unfortunately. OTOH, it isn't
## # needed for libc since this is already mapped into the current
## # process (?)
## #
## # On MAC OSX, it won't work either, because dlopen() needs a full path,
## # and the default suffix is either none or '.dylib'.
## class LoadLibs(unittest.TestCase):
## def test_libm(self):
## import math
## libm = cdll.libm
## sqrt = libm.sqrt
## sqrt.argtypes = (c_double,)
## sqrt.restype = c_double
## self.failUnlessEqual(sqrt(2), math.sqrt(2))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
phad/certificate-transparency | python/utilities/logobserver/logobserver.py | 7 | 2496 | #!/usr/bin/env python
import gflags
from google.protobuf import text_format
import logging
import os
import sys
import requests
from ct.client.db import sqlite_connection as sqlitecon
from ct.client import prober
from ct.client.db import sqlite_log_db
from ct.client.db import sqlite_temp_db
from ct.client.db import sqlite_cert_db
from ct.proto import client_pb2
FLAGS = gflags.FLAGS
gflags.DEFINE_string("ctlog_config", "ct/config/logs.config",
"Configuration file for log servers to monitor")
gflags.DEFINE_string("log_level", "WARNING", "logging level")
gflags.DEFINE_string("ct_sqlite_db", "/tmp/ct", "Location of the CT database")
gflags.DEFINE_string("ct_sqlite_temp_dir", "/tmp/ct_tmp", "Directory for "
"temporary CT data.")
gflags.DEFINE_string("ct_sqlite_cert_db", "/tmp/ct_cert", "Location of "
"certificate database.")
gflags.DEFINE_string("monitor_state_dir", "/tmp/ct_monitor",
"Filename prefix for monitor state. State for a given log "
"will be stored in a monitor_state_dir/log_id file")
def create_directory(directory):
if not os.path.exists(directory):
logging.info("Creating directory: %s" % directory)
os.makedirs(directory)
if __name__ == '__main__':
sys.argv = FLAGS(sys.argv)
logging.basicConfig(level=FLAGS.log_level)
create_directory(FLAGS.ct_sqlite_temp_dir)
create_directory(FLAGS.monitor_state_dir)
sqlite_log_db = sqlite_log_db.SQLiteLogDB(
sqlitecon.SQLiteConnectionManager(FLAGS.ct_sqlite_db))
sqlite_temp_db_factory = sqlite_temp_db.SQLiteTempDBFactory(
sqlitecon.SQLiteConnectionManager(FLAGS.ct_sqlite_temp_dir + "/meta"),
FLAGS.ct_sqlite_temp_dir)
sqlite_cert_db = sqlite_cert_db.SQLiteCertDB(
sqlitecon.SQLiteConnectionManager(FLAGS.ct_sqlite_cert_db))
ctlogs = client_pb2.CtLogs()
with open(FLAGS.ctlog_config, "r") as config:
log_config = config.read()
text_format.Merge(log_config, ctlogs)
ct_server_list = []
for log in ctlogs.ctlog:
sqlite_log_db.update_log(log)
ct_server_list.append(log.log_server)
prober_thread = prober.ProberThread(ctlogs, sqlite_log_db,
sqlite_cert_db,
sqlite_temp_db_factory,
FLAGS.monitor_state_dir)
prober_thread.start()
| apache-2.0 |
mlperf/inference_results_v0.5 | closed/Google/code/gnmt/tpu-gnmt/home/kbuilder/mlperf-inference/google3/third_party/mlperf/inference/gnmt/nmt/tpu/model.py | 1 | 18339 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic sequence-to-sequence model with dynamic RNN support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from tpu import beam_search_decoder
from tpu import decoder
from tpu import model_helper
from tpu.utils import misc_utils as utils
utils.check_tensorflow_version()
__all__ = ["BaseModel"]
def dropout(shape, dtype, keep_ratio):
"""Dropout helper function."""
return tf.math.floor(tf.random.uniform(shape, dtype=dtype) +
keep_ratio) / keep_ratio
class Attention(tf.contrib.rnn.RNNCell):
"""Attention class that exposes both output and attention state."""
def __init__(self, cell):
super(Attention, self).__init__()
self._cell = cell
@property
def wrapped_cell(self):
return self._cell
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size + self._cell.state_size.attention
def __call__(self, inputs, state, scope=None):
out, new_state = self._cell(inputs, state)
return tf.concat([out, new_state.attention], -1), new_state
class BaseModel(object):
"""Sequence-to-sequence base class.
"""
def __init__(self, hparams, mode, features, reuse=tf.AUTO_REUSE):
"""Create the model.
Args:
hparams: Hyperparameter configurations.
mode: TRAIN | EVAL | INFER
features: a dict of input features.
reuse: whether to reuse variables.
"""
if reuse is not None:
self.reuse = reuse
else:
self.reuse = mode == tf.contrib.learn.ModeKeys.INFER
# Set params
self._set_params_initializer(hparams, mode, features)
# Train graph
self.init_embeddings(hparams)
source = features["source"]
def f(seq_len):
return lambda: self.build_graph(hparams, source, seq_len)
def c(seq_len):
return tf.reduce_max(
features["source_sequence_length"]) < tf.constant(seq_len)
res = [
None,
tf.cond(
c(64), lambda: tf.cond(c(32), f(32), f(64)),
lambda: tf.cond(c(96), f(96), f(128)))
]
self._set_train_or_infer(res, hparams)
def _emb_lookup(self, weight, index):
return tf.cast(
tf.reshape(
tf.gather(weight, tf.reshape(index, [-1])),
[index.shape[0], index.shape[1], -1]), self.dtype)
def _set_params_initializer(self, hparams, mode, features):
"""Set various params for self and initialize."""
self.mode = mode
self.src_vocab_size = hparams.src_vocab_size
self.tgt_vocab_size = hparams.tgt_vocab_size
self.features = features
self.dtype = tf.as_dtype(hparams.activation_dtype)
self.single_cell_fn = None
# Set num units
self.num_units = hparams.num_units
self.eos_id = hparams.tgt_eos_id
self.label_smoothing = hparams.label_smoothing
# Set num layers
self.num_encoder_layers = hparams.num_encoder_layers
self.num_decoder_layers = hparams.num_decoder_layers
assert self.num_encoder_layers
assert self.num_decoder_layers
# Batch size
self.batch_size = hparams.infer_batch_size
# Global step
# Use get_global_step instead of user-defied global steps. Otherwise the
# num_train_steps in TPUEstimator.train has no effect (will train forever).
# TPUestimator only check if tf.train.get_global_step() < num_train_steps
self.global_step = None
# Initializer
self.random_seed = hparams.random_seed
initializer = model_helper.get_initializer(
hparams.init_op, self.random_seed, hparams.init_weight)
tf.get_variable_scope().set_initializer(initializer)
def _set_train_or_infer(self, res, hparams):
"""Set up training."""
if self.mode == tf.contrib.learn.ModeKeys.INFER:
self.predicted_ids = res[1]
params = tf.trainable_variables()
# Gradients and SGD update operation for training the model.
# Arrange for the embedding vars to appear at the beginning.
if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
loss = res[0]
self.learning_rate = tf.constant(hparams.learning_rate)
# warm-up
self.learning_rate = self._get_learning_rate_warmup(hparams)
# decay
self.learning_rate = self._get_learning_rate_decay(hparams)
# Optimizer
if hparams.optimizer == "sgd":
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
elif hparams.optimizer == "adam":
opt = tf.train.AdamOptimizer(self.learning_rate)
else:
raise ValueError("Unknown optimizer type %s" % hparams.optimizer)
opt = tf.contrib.tpu.CrossShardOptimizer(opt)
# Gradients
gradients = tf.gradients(loss, params, colocate_gradients_with_ops=True)
clipped_grads, grad_norm = model_helper.gradient_clip(
gradients, max_gradient_norm=hparams.max_gradient_norm)
self.grad_norm = grad_norm
self.update = opt.apply_gradients(
zip(clipped_grads, params), global_step=self.global_step)
# Print trainable variables
utils.print_out("# Trainable variables")
utils.print_out("Format: <name>, <shape>, <(soft) device placement>")
for param in params:
utils.print_out(" %s, %s, %s" % (param.name, str(param.get_shape()),
param.op.device))
def _get_learning_rate_warmup(self, hparams):
"""Get learning rate warmup."""
warmup_steps = hparams.warmup_steps
warmup_scheme = hparams.warmup_scheme
utils.print_out(" learning_rate=%g, warmup_steps=%d, warmup_scheme=%s" %
(hparams.learning_rate, warmup_steps, warmup_scheme))
# Apply inverse decay if global steps less than warmup steps.
# Inspired by https://arxiv.org/pdf/1706.03762.pdf (Section 5.3)
# When step < warmup_steps,
# learing_rate *= warmup_factor ** (warmup_steps - step)
if warmup_scheme == "t2t":
# 0.01^(1/warmup_steps): we start with a lr, 100 times smaller
warmup_factor = tf.exp(tf.log(0.01) / warmup_steps)
inv_decay = warmup_factor**(tf.to_float(warmup_steps - self.global_step))
else:
raise ValueError("Unknown warmup scheme %s" % warmup_scheme)
return tf.cond(
self.global_step < hparams.warmup_steps,
lambda: inv_decay * self.learning_rate,
lambda: self.learning_rate,
name="learning_rate_warump_cond")
def _get_learning_rate_decay(self, hparams):
"""Get learning rate decay."""
return tf.cond(
self.global_step < hparams.decay_start,
lambda: self.learning_rate,
lambda: tf.maximum( # pylint: disable=g-long-lambda
tf.train.exponential_decay(
self.learning_rate,
self.global_step - hparams.decay_start,
hparams.decay_interval,
hparams.decay_factor,
staircase=True),
self.learning_rate * tf.pow(hparams.decay_factor, hparams.
decay_steps)),
name="learning_rate_decay_cond")
def init_embeddings(self, hparams):
"""Init embeddings."""
self.embedding_encoder, self.embedding_decoder = (
model_helper.create_emb_for_encoder_and_decoder(
src_vocab_size=self.src_vocab_size,
tgt_vocab_size=self.tgt_vocab_size,
dtype=self.dtype,
src_embed_size=self.num_units,
tgt_embed_size=self.num_units,
num_enc_partitions=hparams.num_enc_emb_partitions,
num_dec_partitions=hparams.num_dec_emb_partitions,
src_vocab_file=hparams.src_vocab_file,
tgt_vocab_file=hparams.tgt_vocab_file,
src_embed_file=hparams.src_embed_file,
tgt_embed_file=hparams.tgt_embed_file,
))
def build_graph(self, hparams, source, max_seq_len):
"""Subclass must implement this method.
Creates a sequence-to-sequence model with dynamic RNN decoder API.
Args:
hparams: Hyperparameter configurations.
source: The input source.
max_seq_len: The max sequence length
Returns:
A tuple of the form (logits, predicted_ids) for infererence and
(loss, None) for training.
where:
logits: float32 Tensor [batch_size x num_decoder_symbols]
loss: float32 scalar
predicted_ids: predicted ids from beam search.
"""
utils.print_out("# Creating %s graph ..." % self.mode)
source = tf.reshape(
tf.slice(source, [0, 0], [self.batch_size, max_seq_len]),
[self.batch_size, max_seq_len])
with tf.variable_scope(
"dynamic_seq2seq", dtype=self.dtype, reuse=self.reuse):
if hparams.activation_dtype == "bfloat16":
tf.get_variable_scope().set_dtype(tf.bfloat16)
# Encoder
encoder_outputs, encoder_states = self._build_encoder(hparams, source)
## Decoder
with tf.variable_scope("decoder", reuse=self.reuse):
with tf.variable_scope("output_projection", reuse=self.reuse):
self.output_layer = tf.slice(
tf.get_variable(
"kernel",
[self.num_units, 128 * (self.tgt_vocab_size // 128 + 1)]),
[0, 0], [self.num_units, self.tgt_vocab_size])
return self._build_decoder(encoder_outputs, encoder_states, hparams)[1]
def _compute_loss(self, theta, inputs, factored_batch_size=None):
"""Final projection layer and computes the loss."""
logits = tf.cast(
tf.matmul(tf.cast(inputs[0], theta.dtype), theta), tf.float32)
if factored_batch_size is not None:
logits.set_shape([factored_batch_size, self.tgt_vocab_size])
target = tf.cast(tf.reshape(inputs[1], [-1]), tf.int32)
crossent = tf.losses.softmax_cross_entropy(
tf.one_hot(target, self.tgt_vocab_size, dtype=logits.dtype),
logits,
label_smoothing=self.label_smoothing,
reduction=tf.losses.Reduction.NONE)
crossent = tf.where(target == self.eos_id, tf.zeros_like(crossent),
crossent)
return tf.reshape(crossent, [-1]), []
def _build_decoder(self, encoder_outputs, encoder_states, hparams):
"""Build and run a RNN decoder with a final projection layer.
Args:
encoder_outputs: The outputs of encoder for every time step.
encoder_states: The encoder states.
hparams: The Hyperparameters configurations.
Returns:
For inference, A tuple of final logits and final decoder state:
logits: size [time, batch_size, vocab_size]
For training, returns the final loss
"""
with tf.variable_scope("decoder", reuse=self.reuse) as decoder_scope:
memory = tf.transpose(encoder_outputs, [1, 0, 2])
source_sequence_length = self.features["source_sequence_length"]
if self.mode == tf.contrib.learn.ModeKeys.INFER:
memory = tf.contrib.seq2seq.tile_batch(
memory, multiplier=hparams.beam_width)
source_sequence_length = tf.contrib.seq2seq.tile_batch(
source_sequence_length, multiplier=hparams.beam_width)
score_mask_value = tf.convert_to_tensor(
tf.as_dtype(memory.dtype).as_numpy_dtype(-np.inf))
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
hparams.num_units,
memory,
memory_sequence_length=source_sequence_length,
score_mask_value=score_mask_value,
normalize=True,
dtype=memory.dtype)
cell = tf.contrib.rnn.BasicLSTMCell(hparams.num_units, forget_bias=1.0)
atten_cell = Attention(
tf.contrib.seq2seq.AttentionWrapper(
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
output_attention=False,
name="attention"))
cells = []
for i in range(3):
with tf.variable_scope("uni_%d" % i, reuse=self.reuse):
cells.append(
tf.contrib.rnn.BasicLSTMCell(hparams.num_units, forget_bias=1.0))
## Train
if self.mode != tf.contrib.learn.ModeKeys.INFER:
target_input = self.features["target_input"]
batch_size, _ = target_input.shape
target_input = tf.transpose(target_input)
emb = self._emb_lookup(self.embedding_decoder, target_input)
seq_len = self.features["target_sequence_length"]
out, _ = tf.contrib.recurrent.functional_rnn(
atten_cell,
emb * dropout(emb.shape, emb.dtype, 1.0 - hparams.dropout),
dtype=self.dtype,
sequence_length=seq_len,
scope=decoder_scope,
time_major=True,
use_tpu=True)
out, attention = tf.split(out, 2, -1)
for i in range(3):
with tf.variable_scope("uni_%d" % i, reuse=self.reuse) as s:
inp = out
out = tf.concat([out, attention], -1)
out, _ = tf.contrib.recurrent.functional_rnn(
cells[i],
out * dropout(out.shape, emb.dtype, 1.0 - hparams.dropout),
dtype=self.dtype,
sequence_length=seq_len,
scope=s,
time_major=True,
use_tpu=True)
if i > 0:
out += inp
return tf.reduce_sum(
self._compute_loss(self.output_layer, [
tf.reshape(out, [-1, self.num_units]),
tf.transpose(self.features["target_output"])
])[0]), None
## Inference
else:
assert hparams.infer_mode == "beam_search"
start_tokens = tf.fill([self.batch_size], hparams.tgt_sos_id)
end_token = hparams.tgt_eos_id
beam_width = hparams.beam_width
batch_size = self.batch_size * beam_width
length_penalty_weight = hparams.length_penalty_weight
coverage_penalty_weight = hparams.coverage_penalty_weight
# maximum_iteration: The maximum decoding steps.
maximum_iterations = hparams.tgt_max_len_infer
def cell_fn(inputs, state):
"""Cell function used in decoder."""
with tf.variable_scope(
"multi_rnn_cell/cell_0_attention", reuse=self.reuse):
o, s = atten_cell(inputs, state[0])
o, attention = tf.split(o, 2, -1)
new_state = [s]
for i in range(3):
with tf.variable_scope(
"multi_rnn_cell/cell_%d" % (i + 1), reuse=self.reuse):
inp = o
o = tf.concat([o, attention], -1)
o, s = cells[i](o, state[i + 1])
new_state.append(s)
if i > 0:
o = inp + o
return new_state, o
encoder_states = [
tf.contrib.seq2seq.tile_batch(i, beam_width) for i in encoder_states
]
state0 = [
atten_cell.zero_state(
batch_size, self.dtype).clone(cell_state=encoder_states[0])
]
for i in range(1, 4):
state0.append(encoder_states[i])
my_decoder = beam_search_decoder.BeamSearchDecoder(
cell=cell_fn,
embedding=self.embedding_decoder,
start_tokens=start_tokens,
end_token=end_token,
initial_state=state0,
beam_width=beam_width,
output_layer=self.output_layer,
max_tgt=maximum_iterations,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
dtype=self.dtype)
# Dynamic decoding
predicted_ids = decoder.dynamic_decode(
my_decoder,
maximum_iterations=maximum_iterations,
swap_memory=True,
scope=decoder_scope)
return None, predicted_ids
def _build_encoder(self, hparams, source):
"""Build a GNMT encoder."""
source = tf.transpose(source)
with tf.variable_scope("encoder", reuse=self.reuse):
emb = self._emb_lookup(self.embedding_encoder, source)
seq_len = self.features["source_sequence_length"]
if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
emb = emb * dropout(emb.shape, emb.dtype, 1.0 - hparams.dropout)
with tf.variable_scope("bi_fwd", reuse=self.reuse):
fwd_cell = tf.contrib.rnn.BasicLSTMCell(
hparams.num_units, reuse=self.reuse, forget_bias=1.0)
with tf.variable_scope("bi_bwd", reuse=self.reuse):
bwd_cell = tf.contrib.rnn.BasicLSTMCell(
hparams.num_units, reuse=self.reuse, forget_bias=1.0)
bi_outputs, bi_state = tf.contrib.recurrent.bidirectional_functional_rnn(
fwd_cell,
bwd_cell,
emb,
dtype=emb.dtype,
sequence_length=seq_len,
time_major=True,
use_tpu=True)
encoder_states = [bi_state[1]]
out = tf.concat(bi_outputs, -1)
for i in range(3):
inp = out
with tf.variable_scope(
"rnn/multi_rnn_cell/cell_%d" % i, reuse=self.reuse) as scope:
cell = tf.contrib.rnn.BasicLSTMCell(
hparams.num_units, reuse=self.reuse, forget_bias=1.0)
out, state = tf.contrib.recurrent.functional_rnn(
cell,
inp * dropout(inp.shape, emb.dtype, 1.0 - hparams.dropout)
if self.mode == tf.contrib.learn.ModeKeys.TRAIN else inp,
dtype=self.dtype,
sequence_length=seq_len,
time_major=True,
scope=scope,
use_tpu=True)
encoder_states.append(state)
if i > 0:
out += inp
return out, encoder_states
| apache-2.0 |
jazztpt/edx-platform | lms/djangoapps/certificates/migrations/0009_auto__del_field_generatedcertificate_graded_download_url__del_field_ge.py | 188 | 6118 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeneratedCertificate.graded_download_url'
db.delete_column('certificates_generatedcertificate', 'graded_download_url')
# Deleting field 'GeneratedCertificate.graded_certificate_id'
db.delete_column('certificates_generatedcertificate', 'graded_certificate_id')
# Adding field 'GeneratedCertificate.distinction'
db.add_column('certificates_generatedcertificate', 'distinction',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding unique constraint on 'GeneratedCertificate', fields ['course_id', 'user']
db.create_unique('certificates_generatedcertificate', ['course_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'GeneratedCertificate', fields ['course_id', 'user']
db.delete_unique('certificates_generatedcertificate', ['course_id', 'user_id'])
# Adding field 'GeneratedCertificate.graded_download_url'
db.add_column('certificates_generatedcertificate', 'graded_download_url',
self.gf('django.db.models.fields.CharField')(default=False, max_length=128),
keep_default=False)
# Adding field 'GeneratedCertificate.graded_certificate_id'
db.add_column('certificates_generatedcertificate', 'graded_certificate_id',
self.gf('django.db.models.fields.CharField')(default=False, max_length=32),
keep_default=False)
# Deleting field 'GeneratedCertificate.distinction'
db.delete_column('certificates_generatedcertificate', 'distinction')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '32'}),
'course_id': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '255'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '128'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grade': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 |
paul-breen/paul-breen.github.io | js/sos/proj4js/tools/jsmin.py | 95 | 7687 | #!/usr/bin/python
# This code is original from jsmin by Douglas Crockford, it was translated to
# Python by Baruch Even. The original code had the following copyright and
# license.
#
# /* jsmin.c
# 2007-01-08
#
# Copyright (c) 2002 Douglas Crockford (www.crockford.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# The Software shall be used for Good, not Evil.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# */
from StringIO import StringIO
def jsmin(js):
ins = StringIO(js)
outs = StringIO()
JavascriptMinify().minify(ins, outs)
str = outs.getvalue()
if len(str) > 0 and str[0] == '\n':
str = str[1:]
return str
def isAlphanum(c):
"""return true if the character is a letter, digit, underscore,
dollar sign, or non-ASCII character.
"""
return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or
(c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));
class UnterminatedComment(Exception):
pass
class UnterminatedStringLiteral(Exception):
pass
class UnterminatedRegularExpression(Exception):
pass
class JavascriptMinify(object):
def _outA(self):
self.outstream.write(self.theA)
def _outB(self):
self.outstream.write(self.theB)
def _get(self):
"""return the next character from stdin. Watch out for lookahead. If
the character is a control character, translate it to a space or
linefeed.
"""
c = self.theLookahead
self.theLookahead = None
if c == None:
c = self.instream.read(1)
if c >= ' ' or c == '\n':
return c
if c == '': # EOF
return '\000'
if c == '\r':
return '\n'
return ' '
def _peek(self):
self.theLookahead = self._get()
return self.theLookahead
def _next(self):
"""get the next character, excluding comments. peek() is used to see
if a '/' is followed by a '/' or '*'.
"""
c = self._get()
if c == '/':
p = self._peek()
if p == '/':
c = self._get()
while c > '\n':
c = self._get()
return c
if p == '*':
c = self._get()
while 1:
c = self._get()
if c == '*':
if self._peek() == '/':
self._get()
return ' '
if c == '\000':
raise UnterminatedComment()
return c
def _action(self, action):
"""do something! What you do is determined by the argument:
1 Output A. Copy B to A. Get the next B.
2 Copy B to A. Get the next B. (Delete A).
3 Get the next B. (Delete B).
action treats a string as a single character. Wow!
action recognizes a regular expression if it is preceded by ( or , or =.
"""
if action <= 1:
self._outA()
if action <= 2:
self.theA = self.theB
if self.theA == "'" or self.theA == '"':
while 1:
self._outA()
self.theA = self._get()
if self.theA == self.theB:
break
if self.theA <= '\n':
raise UnterminatedStringLiteral()
if self.theA == '\\':
self._outA()
self.theA = self._get()
if action <= 3:
self.theB = self._next()
if self.theB == '/' and (self.theA == '(' or self.theA == ',' or
self.theA == '=' or self.theA == ':' or
self.theA == '[' or self.theA == '?' or
self.theA == '!' or self.theA == '&' or
self.theA == '|'):
self._outA()
self._outB()
while 1:
self.theA = self._get()
if self.theA == '/':
break
elif self.theA == '\\':
self._outA()
self.theA = self._get()
elif self.theA <= '\n':
raise UnterminatedRegularExpression()
self._outA()
self.theB = self._next()
def _jsmin(self):
"""Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed.
"""
self.theA = '\n'
self._action(3)
while self.theA != '\000':
if self.theA == ' ':
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
elif self.theA == '\n':
if self.theB in ['{', '[', '(', '+', '-']:
self._action(1)
elif self.theB == ' ':
self._action(3)
else:
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
else:
if self.theB == ' ':
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
elif self.theB == '\n':
if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
self._action(1)
else:
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
else:
self._action(1)
def minify(self, instream, outstream):
self.instream = instream
self.outstream = outstream
self.theA = None
self.thaB = None
self.theLookahead = None
self._jsmin()
self.instream.close()
if __name__ == '__main__':
import sys
jsm = JavascriptMinify()
jsm.minify(sys.stdin, sys.stdout)
| apache-2.0 |
rosmo/ansible | lib/ansible/modules/network/a10/a10_virtual_server.py | 34 | 11180 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Mischa Peters <mpeters@a10networks.com>,
# Eric Chou <ericc@a10networks.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers.
description:
- Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2.
author:
- Eric Chou (@ericchou1)
- Mischa Peters (@mischapeters)
notes:
- Requires A10 Networks aXAPI 2.1.
extends_documentation_fragment:
- a10
- url
options:
state:
description:
- If the specified virtual server should exist.
choices: ['present', 'absent']
default: present
partition:
version_added: "2.3"
description:
- set active-partition
virtual_server:
description:
- The SLB (Server Load Balancing) virtual server name.
required: true
aliases: ['vip', 'virtual']
virtual_server_ip:
description:
- The SLB virtual server IPv4 address.
aliases: ['ip', 'address']
virtual_server_status:
description:
- The SLB virtual server status, such as enabled or disabled.
default: enable
aliases: ['status']
choices: ['enabled', 'disabled']
virtual_server_ports:
description:
- A list of ports to create for the virtual server. Each list item should be a
dictionary which specifies the C(port:) and C(type:), but can also optionally
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
- port: 80
protocol: TCP
service_group: sg-80-tcp
- port: 443
protocol: HTTPS
service_group: sg-443-https
- port: 8080
protocol: http
status: disabled
'''
RETURN = '''
content:
description: the full info regarding the slb_virtual
returned: success
type: str
sample: "mynewvirtualserver"
'''
import json
from ansible.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure,
axapi_enabled_disabled, axapi_get_vport_protocol, AXAPI_VPORT_PROTOCOLS)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except Exception:
module.fail_json(msg="port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_vport_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
# ensure the service_group field is at least present
if 'service_group' not in item:
item['service_group'] = ''
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
slb_virtual_ip = module.params['virtual_server_ip']
slb_virtual_status = module.params['virtual_server_status']
slb_virtual_ports = module.params['virtual_server_ports']
if slb_virtual is None:
module.fail_json(msg='virtual_server is required')
validate_ports(module, slb_virtual_ports)
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
changed = False
if state == 'present':
json_post = {
'virtual_server': {
'name': slb_virtual,
'address': slb_virtual_ip,
'status': axapi_enabled_disabled(slb_virtual_status),
'vport_list': slb_virtual_ports,
}
}
# before creating/updating we need to validate that any
# service groups defined in the ports list exist since
# since the API will still create port definitions for
# them while indicating a failure occurred
checked_service_groups = []
for port in slb_virtual_ports:
if 'service_group' in port and port['service_group'] not in checked_service_groups:
# skip blank service group entries
if port['service_group'] == '':
continue
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
if axapi_failure(result):
module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
checked_service_groups.append(port['service_group'])
if not slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
else:
def needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port'] == dst_port['port']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
# we check for a needed update both ways, in case ports
# are missing from either the ones specified by the user
# or from those on the device
if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
else:
result = slb_virtual_data
elif state == 'absent':
if slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
changed = True
else:
result = dict(msg="the virtual server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Zac-HD/home-assistant | homeassistant/helpers/state.py | 8 | 7784 | """Helpers that help with state related things."""
import asyncio
import json
import logging
from collections import defaultdict
import homeassistant.util.dt as dt_util
from homeassistant.components.media_player import (
ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_VOLUME_LEVEL, ATTR_MEDIA_VOLUME_MUTED, SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOURCE, ATTR_INPUT_SOURCE)
from homeassistant.components.notify import (
ATTR_MESSAGE, SERVICE_NOTIFY)
from homeassistant.components.sun import (
STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON)
from homeassistant.components.switch.mysensors import (
ATTR_IR_CODE, SERVICE_SEND_IR_CODE)
from homeassistant.components.climate import (
ATTR_AUX_HEAT, ATTR_AWAY_MODE, ATTR_FAN_MODE, ATTR_HOLD_MODE,
ATTR_HUMIDITY, ATTR_OPERATION_MODE, ATTR_SWING_MODE,
SERVICE_SET_AUX_HEAT, SERVICE_SET_AWAY_MODE, SERVICE_SET_HOLD_MODE,
SERVICE_SET_FAN_MODE, SERVICE_SET_HUMIDITY, SERVICE_SET_OPERATION_MODE,
SERVICE_SET_SWING_MODE, SERVICE_SET_TEMPERATURE)
from homeassistant.components.climate.ecobee import (
ATTR_FAN_MIN_ON_TIME, SERVICE_SET_FAN_MIN_ON_TIME,
ATTR_RESUME_ALL, SERVICE_RESUME_PROGRAM)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_DISARM, SERVICE_ALARM_TRIGGER,
SERVICE_LOCK, SERVICE_MEDIA_PAUSE, SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_SEEK, SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_UNLOCK,
SERVICE_VOLUME_MUTE, SERVICE_VOLUME_SET, SERVICE_OPEN_COVER,
SERVICE_CLOSE_COVER, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED, STATE_CLOSED, STATE_LOCKED,
STATE_OFF, STATE_ON, STATE_OPEN, STATE_PAUSED, STATE_PLAYING,
STATE_UNKNOWN, STATE_UNLOCKED, SERVICE_SELECT_OPTION, ATTR_OPTION)
from homeassistant.core import State
from homeassistant.util.async import run_coroutine_threadsafe
_LOGGER = logging.getLogger(__name__)
GROUP_DOMAIN = 'group'
HASS_DOMAIN = 'homeassistant'
# Update this dict of lists when new services are added to HA.
# Each item is a service with a list of required attributes.
SERVICE_ATTRIBUTES = {
SERVICE_PLAY_MEDIA: [ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_CONTENT_ID],
SERVICE_MEDIA_SEEK: [ATTR_MEDIA_SEEK_POSITION],
SERVICE_VOLUME_MUTE: [ATTR_MEDIA_VOLUME_MUTED],
SERVICE_VOLUME_SET: [ATTR_MEDIA_VOLUME_LEVEL],
SERVICE_NOTIFY: [ATTR_MESSAGE],
SERVICE_SET_AWAY_MODE: [ATTR_AWAY_MODE],
SERVICE_SET_FAN_MODE: [ATTR_FAN_MODE],
SERVICE_SET_FAN_MIN_ON_TIME: [ATTR_FAN_MIN_ON_TIME],
SERVICE_RESUME_PROGRAM: [ATTR_RESUME_ALL],
SERVICE_SET_TEMPERATURE: [ATTR_TEMPERATURE],
SERVICE_SET_HUMIDITY: [ATTR_HUMIDITY],
SERVICE_SET_SWING_MODE: [ATTR_SWING_MODE],
SERVICE_SET_HOLD_MODE: [ATTR_HOLD_MODE],
SERVICE_SET_OPERATION_MODE: [ATTR_OPERATION_MODE],
SERVICE_SET_AUX_HEAT: [ATTR_AUX_HEAT],
SERVICE_SELECT_SOURCE: [ATTR_INPUT_SOURCE],
SERVICE_SEND_IR_CODE: [ATTR_IR_CODE],
SERVICE_SELECT_OPTION: [ATTR_OPTION]
}
# Update this dict when new services are added to HA.
# Each item is a service with a corresponding state.
SERVICE_TO_STATE = {
SERVICE_TURN_ON: STATE_ON,
SERVICE_TURN_OFF: STATE_OFF,
SERVICE_MEDIA_PLAY: STATE_PLAYING,
SERVICE_MEDIA_PAUSE: STATE_PAUSED,
SERVICE_ALARM_ARM_AWAY: STATE_ALARM_ARMED_AWAY,
SERVICE_ALARM_ARM_HOME: STATE_ALARM_ARMED_HOME,
SERVICE_ALARM_DISARM: STATE_ALARM_DISARMED,
SERVICE_ALARM_TRIGGER: STATE_ALARM_TRIGGERED,
SERVICE_LOCK: STATE_LOCKED,
SERVICE_UNLOCK: STATE_UNLOCKED,
SERVICE_OPEN_COVER: STATE_OPEN,
SERVICE_CLOSE_COVER: STATE_CLOSED
}
class AsyncTrackStates(object):
"""
Record the time when the with-block is entered.
Add all states that have changed since the start time to the return list
when with-block is exited.
Must be run within the event loop.
"""
def __init__(self, hass):
"""Initialize a TrackStates block."""
self.hass = hass
self.states = []
# pylint: disable=attribute-defined-outside-init
def __enter__(self):
"""Record time from which to track changes."""
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type, exc_value, traceback):
"""Add changes states to changes list."""
self.states.extend(get_changed_since(self.hass.states.async_all(),
self.now))
def get_changed_since(states, utc_point_in_time):
"""Return list of states that have been changed since utc_point_in_time."""
return [state for state in states
if state.last_updated >= utc_point_in_time]
def reproduce_state(hass, states, blocking=False):
"""Reproduce given state."""
return run_coroutine_threadsafe(
async_reproduce_state(hass, states, blocking), hass.loop).result()
@asyncio.coroutine
def async_reproduce_state(hass, states, blocking=False):
"""Reproduce given state."""
if isinstance(states, State):
states = [states]
to_call = defaultdict(list)
for state in states:
if hass.states.get(state.entity_id) is None:
_LOGGER.warning('reproduce_state: Unable to find entity %s',
state.entity_id)
continue
if state.domain == GROUP_DOMAIN:
service_domain = HASS_DOMAIN
else:
service_domain = state.domain
domain_services = hass.services.async_services()[service_domain]
service = None
for _service in domain_services.keys():
if (_service in SERVICE_ATTRIBUTES and
all(attr in state.attributes
for attr in SERVICE_ATTRIBUTES[_service]) or
_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
service = _service
if (_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
break
if not service:
_LOGGER.warning("reproduce_state: Unable to reproduce state %s",
state)
continue
# We group service calls for entities by service call
# json used to create a hashable version of dict with maybe lists in it
key = (service_domain, service,
json.dumps(dict(state.attributes), sort_keys=True))
to_call[key].append(state.entity_id)
domain_tasks = {}
for (service_domain, service, service_data), entity_ids in to_call.items():
data = json.loads(service_data)
data[ATTR_ENTITY_ID] = entity_ids
if service_domain not in domain_tasks:
domain_tasks[service_domain] = []
domain_tasks[service_domain].append(
hass.services.async_call(service_domain, service, data, blocking)
)
@asyncio.coroutine
def async_handle_service_calls(coro_list):
"""Handle service calls by domain sequence."""
for coro in coro_list:
yield from coro
execute_tasks = [async_handle_service_calls(coro_list)
for coro_list in domain_tasks.values()]
if execute_tasks:
yield from asyncio.wait(execute_tasks, loop=hass.loop)
def state_as_number(state):
"""
Try to coerce our state to a number.
Raises ValueError if this is not possible.
"""
if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON,
STATE_OPEN):
return 1
elif state.state in (STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN,
STATE_BELOW_HORIZON, STATE_CLOSED):
return 0
return float(state.state)
| apache-2.0 |
amitskwalia/codesters | Imaging-1.1.7/setup.py | 4 | 17323 | #!/usr/bin/env python
#
# Setup script for PIL 1.1.5 and later
#
# Usage: python setup.py install
#
import glob, os, re, struct, string, sys
# make it possible to run the setup script from another directory
try:
os.chdir(os.path.dirname(sys.argv[0]))
except OSError:
pass
def libinclude(root):
# map root to (root/lib, root/include)
return os.path.join(root, "lib"), os.path.join(root, "include")
# --------------------------------------------------------------------
# Library pointers.
#
# Use None to look for the libraries in well-known library locations.
# Use a string to specify a single directory, for both the library and
# the include files. Use a tuple to specify separate directories:
# (libpath, includepath). Examples:
#
# JPEG_ROOT = "/home/libraries/jpeg-6b"
# TIFF_ROOT = "/opt/tiff/lib", "/opt/tiff/include"
#
# If you have "lib" and "include" directories under a common parent,
# you can use the "libinclude" helper:
#
# TIFF_ROOT = libinclude("/opt/tiff")
TCL_ROOT = None
JPEG_ROOT = None
ZLIB_ROOT = None
TIFF_ROOT = None
FREETYPE_ROOT = None
LCMS_ROOT = None
# FIXME: add mechanism to explicitly *disable* the use of a library
# --------------------------------------------------------------------
# Identification
NAME = "PIL"
DESCRIPTION = "Python Imaging Library"
AUTHOR = "Secret Labs AB (PythonWare)", "info@pythonware.com"
HOMEPAGE = "http://www.pythonware.com/products/pil"
DOWNLOAD_URL = "http://effbot.org/downloads/%s-%s.tar.gz" # name, version
# --------------------------------------------------------------------
# Core library
IMAGING = [
"decode", "encode", "map", "display", "outline", "path",
]
LIBIMAGING = [
"Access", "Antialias", "Bands", "BitDecode", "Blend", "Chops",
"Convert", "ConvertYCbCr", "Copy", "Crc32", "Crop", "Dib", "Draw",
"Effects", "EpsEncode", "File", "Fill", "Filter", "FliDecode",
"Geometry", "GetBBox", "GifDecode", "GifEncode", "HexDecode",
"Histo", "JpegDecode", "JpegEncode", "LzwDecode", "Matrix",
"ModeFilter", "MspDecode", "Negative", "Offset", "Pack",
"PackDecode", "Palette", "Paste", "Quant", "QuantHash",
"QuantHeap", "PcdDecode", "PcxDecode", "PcxEncode", "Point",
"RankFilter", "RawDecode", "RawEncode", "Storage", "SunRleDecode",
"TgaRleDecode", "Unpack", "UnpackYCC", "UnsharpMask", "XbmDecode",
"XbmEncode", "ZipDecode", "ZipEncode"
]
# --------------------------------------------------------------------
# Override settings
try:
from setup_site import *
except ImportError:
pass
# --------------------------------------------------------------------
from distutils import sysconfig
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
try:
import _tkinter
except ImportError:
_tkinter = None
def add_directory(path, dir, where=None):
if dir and os.path.isdir(dir) and dir not in path:
if where is None:
path.append(dir)
else:
path.insert(where, dir)
def find_include_file(self, include):
for directory in self.compiler.include_dirs:
if os.path.isfile(os.path.join(directory, include)):
return 1
return 0
def find_library_file(self, library):
return self.compiler.find_library_file(self.compiler.library_dirs, library)
def find_version(filename):
for line in open(filename).readlines():
m = re.search("VERSION\s*=\s*\"([^\"]+)\"", line)
if m:
return m.group(1)
return None
VERSION = find_version("PIL/Image.py")
class pil_build_ext(build_ext):
def build_extensions(self):
global TCL_ROOT
library_dirs = []
include_dirs = []
add_directory(include_dirs, "libImaging")
#
# add platform directories
if sys.platform == "cygwin":
# pythonX.Y.dll.a is in the /usr/lib/pythonX.Y/config directory
add_directory(library_dirs, os.path.join(
"/usr/lib", "python%s" % sys.version[:3], "config"
))
elif sys.platform == "darwin":
# attempt to make sure we pick freetype2 over other versions
add_directory(include_dirs, "/sw/include/freetype2")
add_directory(include_dirs, "/sw/lib/freetype2/include")
# fink installation directories
add_directory(library_dirs, "/sw/lib")
add_directory(include_dirs, "/sw/include")
# darwin ports installation directories
add_directory(library_dirs, "/opt/local/lib")
add_directory(include_dirs, "/opt/local/include")
add_directory(library_dirs, "/usr/local/lib")
# FIXME: check /opt/stuff directories here?
prefix = sysconfig.get_config_var("prefix")
if prefix:
add_directory(library_dirs, os.path.join(prefix, "lib"))
add_directory(include_dirs, os.path.join(prefix, "include"))
#
# locate tkinter libraries
if _tkinter:
TCL_VERSION = _tkinter.TCL_VERSION[:3]
if _tkinter and not TCL_ROOT:
# we have Tkinter but the TCL_ROOT variable was not set;
# try to locate appropriate Tcl/Tk libraries
PYVERSION = sys.version[0] + sys.version[2]
TCLVERSION = TCL_VERSION[0] + TCL_VERSION[2]
roots = [
# common installation directories, mostly for Windows
# (for Unix-style platforms, we'll check in well-known
# locations later)
os.path.join("/py" + PYVERSION, "Tcl"),
os.path.join("/python" + PYVERSION, "Tcl"),
"/Tcl", "/Tcl" + TCLVERSION, "/Tcl" + TCL_VERSION,
os.path.join(os.environ.get("ProgramFiles", ""), "Tcl"),
]
for TCL_ROOT in roots:
TCL_ROOT = os.path.abspath(TCL_ROOT)
if os.path.isfile(os.path.join(TCL_ROOT, "include", "tk.h")):
# FIXME: use distutils logging (?)
print "--- using Tcl/Tk libraries at", TCL_ROOT
print "--- using Tcl/Tk version", TCL_VERSION
TCL_ROOT = libinclude(TCL_ROOT)
break
else:
TCL_ROOT = None
#
# add configured kits
for root in (TCL_ROOT, JPEG_ROOT, TCL_ROOT, TIFF_ROOT, ZLIB_ROOT,
FREETYPE_ROOT, LCMS_ROOT):
if isinstance(root, type(())):
lib_root, include_root = root
else:
lib_root = include_root = root
add_directory(library_dirs, lib_root)
add_directory(include_dirs, include_root)
#
# add standard directories
# look for tcl specific subdirectory (e.g debian)
if _tkinter:
tcl_dir = "/usr/include/tcl" + TCL_VERSION
if os.path.isfile(os.path.join(tcl_dir, "tk.h")):
add_directory(include_dirs, tcl_dir)
# standard locations
add_directory(library_dirs, "/usr/local/lib")
add_directory(include_dirs, "/usr/local/include")
add_directory(library_dirs, "/usr/lib")
add_directory(include_dirs, "/usr/include")
#
# insert new dirs *before* default libs, to avoid conflicts
# between Python PYD stub libs and real libraries
self.compiler.library_dirs = library_dirs + self.compiler.library_dirs
self.compiler.include_dirs = include_dirs + self.compiler.include_dirs
#
# look for available libraries
class feature:
zlib = jpeg = tiff = freetype = tcl = tk = lcms = None
feature = feature()
if find_include_file(self, "zlib.h"):
if find_library_file(self, "z"):
feature.zlib = "z"
elif sys.platform == "win32" and find_library_file(self, "zlib"):
feature.zlib = "zlib" # alternative name
if find_include_file(self, "jpeglib.h"):
if find_library_file(self, "jpeg"):
feature.jpeg = "jpeg"
elif sys.platform == "win32" and find_library_file(self, "libjpeg"):
feature.jpeg = "libjpeg" # alternative name
if find_library_file(self, "tiff"):
feature.tiff = "tiff"
if find_library_file(self, "freetype"):
# look for freetype2 include files
freetype_version = 0
for dir in self.compiler.include_dirs:
if os.path.isfile(os.path.join(dir, "ft2build.h")):
freetype_version = 21
dir = os.path.join(dir, "freetype2")
break
dir = os.path.join(dir, "freetype2")
if os.path.isfile(os.path.join(dir, "ft2build.h")):
freetype_version = 21
break
if os.path.isdir(os.path.join(dir, "freetype")):
freetype_version = 20
break
if freetype_version:
feature.freetype = "freetype"
feature.freetype_version = freetype_version
if dir:
add_directory(self.compiler.include_dirs, dir, 0)
if find_include_file(self, "lcms.h"):
if find_library_file(self, "lcms"):
feature.lcms = "lcms"
if _tkinter and find_include_file(self, "tk.h"):
# the library names may vary somewhat (e.g. tcl84 or tcl8.4)
version = TCL_VERSION[0] + TCL_VERSION[2]
if find_library_file(self, "tcl" + version):
feature.tcl = "tcl" + version
elif find_library_file(self, "tcl" + TCL_VERSION):
feature.tcl = "tcl" + TCL_VERSION
if find_library_file(self, "tk" + version):
feature.tk = "tk" + version
elif find_library_file(self, "tk" + TCL_VERSION):
feature.tk = "tk" + TCL_VERSION
#
# core library
files = ["_imaging.c"]
for file in IMAGING:
files.append(file + ".c")
for file in LIBIMAGING:
files.append(os.path.join("libImaging", file + ".c"))
libs = []
defs = []
if feature.jpeg:
libs.append(feature.jpeg)
defs.append(("HAVE_LIBJPEG", None))
if feature.zlib:
libs.append(feature.zlib)
defs.append(("HAVE_LIBZ", None))
if sys.platform == "win32":
libs.extend(["kernel32", "user32", "gdi32"])
if struct.unpack("h", "\0\1")[0] == 1:
defs.append(("WORDS_BIGENDIAN", None))
exts = [(Extension(
"_imaging", files, libraries=libs, define_macros=defs
))]
#
# additional libraries
if feature.freetype:
defs = []
if feature.freetype_version == 20:
defs.append(("USE_FREETYPE_2_0", None))
exts.append(Extension(
"_imagingft", ["_imagingft.c"], libraries=["freetype"],
define_macros=defs
))
if os.path.isfile("_imagingtiff.c") and feature.tiff:
exts.append(Extension(
"_imagingtiff", ["_imagingtiff.c"], libraries=["tiff"]
))
if os.path.isfile("_imagingcms.c") and feature.lcms:
extra = []
if sys.platform == "win32":
extra.extend(["user32", "gdi32"])
exts.append(Extension(
"_imagingcms", ["_imagingcms.c"], libraries=["lcms"] + extra
))
if sys.platform == "darwin":
# locate Tcl/Tk frameworks
frameworks = []
framework_roots = [
"/Library/Frameworks",
"/System/Library/Frameworks"
]
for root in framework_roots:
if (os.path.exists(os.path.join(root, "Tcl.framework")) and
os.path.exists(os.path.join(root, "Tk.framework"))):
print "--- using frameworks at", root
frameworks = ["-framework", "Tcl", "-framework", "Tk"]
dir = os.path.join(root, "Tcl.framework", "Headers")
add_directory(self.compiler.include_dirs, dir, 0)
dir = os.path.join(root, "Tk.framework", "Headers")
add_directory(self.compiler.include_dirs, dir, 1)
break
if frameworks:
exts.append(Extension(
"_imagingtk", ["_imagingtk.c", "Tk/tkImaging.c"],
extra_compile_args=frameworks, extra_link_args=frameworks
))
feature.tcl = feature.tk = 1 # mark as present
elif feature.tcl and feature.tk:
exts.append(Extension(
"_imagingtk", ["_imagingtk.c", "Tk/tkImaging.c"],
libraries=[feature.tcl, feature.tk]
))
if os.path.isfile("_imagingmath.c"):
exts.append(Extension("_imagingmath", ["_imagingmath.c"]))
self.extensions[:] = exts
build_ext.build_extensions(self)
#
# sanity and security checks
unsafe_zlib = None
if feature.zlib:
unsafe_zlib = self.check_zlib_version(self.compiler.include_dirs)
self.summary_report(feature, unsafe_zlib)
def summary_report(self, feature, unsafe_zlib):
print "-" * 68
print "PIL", VERSION, "SETUP SUMMARY"
print "-" * 68
print "version ", VERSION
v = string.split(sys.version, "[")
print "platform ", sys.platform, string.strip(v[0])
for v in v[1:]:
print " ", string.strip("[" + v)
print "-" * 68
options = [
(feature.tcl and feature.tk, "TKINTER"),
(feature.jpeg, "JPEG"),
(feature.zlib, "ZLIB (PNG/ZIP)"),
# (feature.tiff, "experimental TIFF G3/G4 read"),
(feature.freetype, "FREETYPE2"),
(feature.lcms, "LITTLECMS"),
]
all = 1
for option in options:
if option[0]:
print "---", option[1], "support available"
else:
print "***", option[1], "support not available",
if option[1] == "TKINTER" and _tkinter:
version = _tkinter.TCL_VERSION
print "(Tcl/Tk %s libraries needed)" % version,
print
all = 0
if feature.zlib and unsafe_zlib:
print
print "*** Warning: zlib", unsafe_zlib,
print "may contain a security vulnerability."
print "*** Consider upgrading to zlib 1.2.3 or newer."
print "*** See: http://www.kb.cert.org/vuls/id/238678"
print " http://www.kb.cert.org/vuls/id/680620"
print " http://www.gzip.org/zlib/advisory-2002-03-11.txt"
print
print "-" * 68
if not all:
print "To add a missing option, make sure you have the required"
print "library, and set the corresponding ROOT variable in the"
print "setup.py script."
print
print "To check the build, run the selftest.py script."
def check_zlib_version(self, include_dirs):
# look for unsafe versions of zlib
for dir in include_dirs:
zlibfile = os.path.join(dir, "zlib.h")
if os.path.isfile(zlibfile):
break
else:
return
for line in open(zlibfile).readlines():
m = re.match('#define\s+ZLIB_VERSION\s+"([^"]*)"', line)
if not m:
continue
if m.group(1) < "1.2.3":
return m.group(1)
#
# build!
if __name__ == "__main__":
try:
# add necessary to distutils (for backwards compatibility)
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
DistributionMetadata.platforms = None
except:
pass
setup(
author=AUTHOR[0], author_email=AUTHOR[1],
classifiers=[
"Development Status :: 6 - Mature",
"Topic :: Multimedia :: Graphics",
"Topic :: Multimedia :: Graphics :: Capture :: Digital Camera",
"Topic :: Multimedia :: Graphics :: Capture :: Scanners",
"Topic :: Multimedia :: Graphics :: Capture :: Screen Capture",
"Topic :: Multimedia :: Graphics :: Graphics Conversion",
"Topic :: Multimedia :: Graphics :: Viewers",
],
cmdclass = {"build_ext": pil_build_ext},
description=DESCRIPTION,
download_url=DOWNLOAD_URL % (NAME, VERSION),
ext_modules = [Extension("_imaging", ["_imaging.c"])], # dummy
extra_path = "PIL",
license="Python (MIT style)",
long_description=DESCRIPTION,
name=NAME,
package_dir={"": "PIL"},
packages=[""],
platforms="Python 1.5.2 and later.",
scripts = glob.glob("Scripts/pil*.py"),
url=HOMEPAGE,
version=VERSION,
)
| mit |
Tinkerforge/brickv | src/brickv/plugin_system/plugins/piezo_buzzer/piezo_buzzer.py | 1 | 4408 | # -*- coding: utf-8 -*-
"""
Piezo Buzzer Plugin
Copyright (C) 2011-2012 Olaf Lüke <olaf@tinkerforge.com>
Copyright (C) 2014, 2016 Matthias Bolte <matthias@tinkerforge.com>
piezo_buzzer.py: Piezo Buzzer Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtCore import pyqtSignal, QRegularExpression
from PyQt5.QtGui import QRegularExpressionValidator
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QLineEdit
from brickv.plugin_system.plugin_base import PluginBase
from brickv.bindings import ip_connection
from brickv.bindings.bricklet_piezo_buzzer import BrickletPiezoBuzzer
class PiezoBuzzer(PluginBase):
qtcb_beep_finished = pyqtSignal()
qtcb_morse_finished = pyqtSignal()
def __init__(self, *args):
super().__init__(BrickletPiezoBuzzer, *args)
self.pb = self.device
self.qtcb_beep_finished.connect(self.cb_beep)
self.pb.register_callback(self.pb.CALLBACK_BEEP_FINISHED,
self.qtcb_beep_finished.emit)
self.qtcb_morse_finished.connect(self.cb_morse)
self.pb.register_callback(self.pb.CALLBACK_MORSE_CODE_FINISHED,
self.qtcb_morse_finished.emit)
self.beep_edit = QLineEdit()
self.beep_edit.setText(str(1000))
self.beep_label = QLabel('Duration [ms]:')
self.beep_button = QPushButton('Send Beep')
self.beep_layout = QHBoxLayout()
self.beep_layout.addWidget(self.beep_label)
self.beep_layout.addWidget(self.beep_edit)
self.beep_layout.addWidget(self.beep_button)
self.morse_edit = QLineEdit()
self.morse_edit.setText('- .. -. -.- . .-. ..-. --- .-. --. .')
self.morse_edit.setMaxLength(60)
self.morse_edit.setValidator(QRegularExpressionValidator(QRegularExpression("[\\s|\\-|\\.]*")))
self.morse_label = QLabel('Morse Code:')
self.morse_button = QPushButton('Send Morse Code')
self.morse_layout = QHBoxLayout()
self.morse_layout.addWidget(self.morse_label)
self.morse_layout.addWidget(self.morse_edit)
self.morse_layout.addWidget(self.morse_button)
self.status_label = QLabel('Status: Idle')
self.beep_button.clicked.connect(self.beep_clicked)
self.morse_button.clicked.connect(self.morse_clicked)
layout = QVBoxLayout(self)
layout.addLayout(self.beep_layout)
layout.addLayout(self.morse_layout)
layout.addWidget(self.status_label)
layout.addStretch()
def start(self):
pass
def stop(self):
pass
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletPiezoBuzzer.DEVICE_IDENTIFIER
def cb_beep(self):
self.beep_button.setDisabled(False)
self.morse_button.setDisabled(False)
self.status_label.setText('Status: Idle')
def cb_morse(self):
self.beep_button.setDisabled(False)
self.morse_button.setDisabled(False)
self.status_label.setText('Status: Idle')
def beep_clicked(self):
duration = int(self.beep_edit.text())
try:
self.pb.beep(duration)
except ip_connection.Error:
return
self.beep_button.setDisabled(True)
self.morse_button.setDisabled(True)
self.status_label.setText('Status: Beeping...')
def morse_clicked(self):
morse = self.morse_edit.text()
try:
self.pb.morse_code(morse)
except ip_connection.Error:
return
self.beep_button.setDisabled(True)
self.morse_button.setDisabled(True)
self.status_label.setText('Status: Beeping...')
| gpl-2.0 |
MatthiasMuellerReineke/push-admin | bin/test_selfcontaining.py | 1 | 46288 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# Copyright (C) 2011 Matthias Müller-Reineke
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""The working directory must be test_files
(otherwise these tests will fail)!"""
from imp import load_module, PY_SOURCE
from stat import S_IRUSR, S_IWUSR
from os import mkdir, stat, makedirs, symlink, chmod,\
environ
from time import sleep
from subprocess import CalledProcessError, Popen, PIPE
from os.path import join, lexists, isdir
from cStringIO import StringIO
import re
import unittest
from aslib.utilities import tunix, write, memoize, ensure_contains,\
stat_mode
from aslib.remote_exec import MatchBuffer, StdWrapper,\
CatchStdout,\
AlwaysPrintDestination, PrintDestinationForOutput, NoTty,\
communicate_with_child
from aslib.os_objects import option_with_values, User, UsersGroups,\
Packages,\
Files, Link, Directory, NoManipulation, Make, ChangeGroup,\
mkdir_command, commands_from_instances
from aslib.predefined import All, Override, Offline, El,\
non_existent, file_parts_entry,\
FileNameCollision, MakeExecutable, MakeOwnedRecursivelyBy,\
Debianish, DontTouch, hosts_with_class,\
in_rcd_initd, link_in_same_dir, dir_of_tree,\
NotSubclassOfFromExaminationOfSystem, FromExaminationOfSystem,\
call_object_attr, AttributeErrorInCallable, ClassOfSystems
from aslib import remote_exec
from aslib import predefined
hosts_file_name = join(environ['PUSH_ADMIN_DIR'],
'aslib/test_sites/test_files/hosts.py')
load_module('hosts', open(hosts_file_name), hosts_file_name,
('.py', 'r', PY_SOURCE))
from aslib import process_hosts
from aslib.process_hosts import\
classes_from_examination_of_running_system, get_conf_attr,\
DryRun, RealRun, options_from_runmode, ShallIProcessHost,\
process_relevant_hosts,\
DefaultAll,\
classes_from_names, diff, not_printable_diff_text, mkdir_p
from aslib import test_util
from aslib.test_util import file_content, write_file, RunModeMock,\
on_exit_vanishing_dtemp, distribution_centos, system_object_init,\
usermod, RunModeClassDummy, TestInTmpDir
shall_i_process_host = ShallIProcessHost()
packagemanagers_install_cmd = Packages.packagemanagers_install_cmd
class TestMatchBuffer(unittest.TestCase):
to_be_matched = 'cx'
def test_returns_non_matching(self):
val = 'b'
self.execute_one_buffer_value(val, val)
def test_returns_non_matching_twice(self):
val = 'b'
self.execute_one_buffer_value(val + self.to_be_matched + val,
val * 2)
def test_match_reset(self):
self.execute_one_buffer_value('ca' + self.to_be_matched, 'ca')
def test_filters_matching(self):
expected_result = 'd'
self.execute_one_buffer_value(
expected_result + self.to_be_matched, expected_result)
def execute_one_buffer_value(self, value, expected_result):
self.assertEqual(
MatchBuffer(self.to_be_matched).buffer_value(value),
expected_result)
def test_two_buffer_values(self):
mb = MatchBuffer(self.to_be_matched)
v1 = mb.buffer_value('abc')
v2 = mb.buffer_value('xde')
self.assertEqual(v1 + v2, 'abde')
class TestPrintDestinationForOutput(test_util.TestReplaceLibAttr):
manipulated = ('stdout', 'stderr')
def setUp(self):
super(TestPrintDestinationForOutput, self).setUp()
for i in self.manipulated:
self.manipulate_module(remote_exec, i, StringIO())
def test_nothing_used(self):
self.execute(tunix, 0)
def test_stdout_used(self):
self.execute(lambda forward:
self.call_take_val(forward.take_stdout))
def test_stderr_used(self):
self.execute(lambda forward:
self.call_take_val(forward.take_stderr))
def call_take_val(self, take):
self.call_take(take, 'x')
def call_take(self, take, val=''):
take(val, NoTty())
def execute(self, execute, expected_calls=1):
class AllMock:
calls = 0
def print_dest(self):
self.calls += 1
am = AllMock()
forward = PrintDestinationForOutput(am)
execute(forward)
# They are at least called once with an empty string
# in productive use:
self.call_take(forward.take_stdout)
self.call_take(forward.take_stderr)
self.assertEqual(am.calls, expected_calls)
class TestPackageNameMapping(unittest.TestCase):
to_be_mapped = 'to-be-mapped-for-one-distribution'
not_to_be_mapped = 'not-to-be-mapped'
example_packages = [to_be_mapped, not_to_be_mapped]
def test_packages_cmd_with_map(self):
mapped = 'this-is-mapped'
class MappingRequiringDistribution(Debianish):
package_mapper = {self.to_be_mapped: mapped}
self.packages_cmd_map_names(
lambda get_remote: [MappingRequiringDistribution],
[mapped, self.not_to_be_mapped])
def test_packages_cmd_without_map(self):
self.packages_cmd_map_names(distribution_centos,
self.example_packages)
def packages_cmd_map_names(self,
get_classes_from_examination_of_system, expected_packages):
class AllDerived(All):
packages = self.example_packages
sorted_expected_packages = sorted(expected_packages)
s = AllDerived('')
system_object_init(s, get_classes_from_examination_of_system)
self.assertEqual(s.packages_cmd().all_commands(),
[s.install_pkg_cmd
+ packagemanagers_install_cmd
+ ' '.join(sorted_expected_packages)
+ s.check_installation(sorted_expected_packages)])
class TestExecuteTestOverride(unittest.TestCase):
user_name = 'user_name'
user_add = User.cmd(user_name)
group_name = 'group_name'
def test_user_all_commands(self):
self.assertEqual(User(None, self.user_name).all_commands(),
[self.user_add])
def test_user_create_commands(self):
self.assertEqual(UsersGroups(None, self.user_name).create_commands(
[self.group_name], None), [self.usermod_group()])
def test_usermod_opt_tuple_pre_overlay_2_groups(self):
self.usermod_opt_tuple_pre_overlay_groups(
['1st_group', '2nd_group'])
def test_usermod_opt_tuple_pre_overlay_1_group(self):
self.usermod_opt_tuple_pre_overlay_groups([self.group_name])
def usermod_opt_tuple_pre_overlay_groups(self, groups):
return self.usermod_opt_tuple_pre_overlay(groups,
groups_options(','.join(groups)))
def test_files_chown_in_post_overlay(self):
file_name = 'file_name'
self.execute_post_overlay(
(file_name, 'file_content', Make(group=self.group_name)),
[ChangeGroup(self.group_name)
.create_single_command(file_name)], 'files')
def test_usermod_opt_tuple_pre_overlay(self):
self.execute_post_overlay([self.user_name], [self.user_add])
def usermod_opt_tuple_pre_overlay(self, groups=[],
additional_option=''):
self.execute_post_overlay([self.user_name] + groups,
[self.user_add, self.usermod(additional_option)])
def test_users_in_pre_overlay(self):
self.execute_post_overlay(self.user_name, [self.user_add])
def test_user_with_group_in_pre_overlay(self):
self.execute_post_overlay((self.user_name, self.group_name),
[self.user_add, self.usermod_group()])
def usermod_group(self):
return self.usermod(groups_options(self.group_name))
def usermod(self, options):
return usermod(self.user_name, options)
def test_user_group_order_in_pre_overlay(self):
class GroupCreating(Override):
def groups(inner_self):
return [self.group_name]
self.execute_os_system_object('pre_overlay_commands',
'dummy', None, ['groupadd ' + self.group_name], GroupCreating)
def execute_post_overlay(self, return_item, expected_commands,
result_yielding_method_name='users', *overrides):
self.execute_os_system_object('post_overlay_commands',
result_yielding_method_name, return_item, expected_commands,
*overrides)
def test_commands_in_post_inst(self):
cmd = 'abcd xyz'
self.execute_post_inst('commands', cmd, [cmd])
def execute_post_inst(self, method_name, return_item, expected_commands,
*overrides):
self.execute_os_system_object('post_inst',
method_name, return_item, expected_commands, *overrides)
def execute_os_system_object(self, result_yielding_method_name,
method_name, return_item, expected_commands, *overrides):
self.execute_test_override(method_name, return_item,
expected_commands, lambda test_class:
getattr(non_existent_centos_runmodemock(test_class,
*overrides), result_yielding_method_name)())
def test_files_in_generated_files(self):
test_file = ('a', 'b')
self.execute_test_override('files', test_file, [test_file],
lambda test_class: root_is_local(
test_class).generated_files())
def test_default_inst_device_override(self):
expected = '/dev/sdxyz'
class WeOverrideInstDevice(Override):
def inst_device(self):
return expected
self.assertEqual(non_existent_centos_runmodemock(
WeOverrideInstDevice).inst_device(), expected)
def execute_test_override(self, method_name, return_item, expected,
calculate_result):
class TestClass(Override):
pass
setattr(TestClass, method_name, lambda self: [return_item])
self.assertEqual(calculate_result(TestClass), expected)
def groups_options(groups):
return '--append --groups {0} '.format(groups)
class TestCommands(unittest.TestCase):
__name = 'a'
source = 'b'
def setUp(self):
self.r = RootIsLocal()
self.remote_name = join(self.r.remote_root(), self.__name)
def test_symlink_in_non_existing_dir(self):
source = self.source
dirname = 'd'
link = join(dirname, self.__name)
l = Link(self.r, (self.source, link))
self.execute(l,
[Directory(self.r, dirname).create_single_command(None),
l.create_single_command(None)])
def test_existing_correctly(self):
symlink(self.source, self.remote_name)
self.execute_link([])
def test_wrong_existing(self):
symlink('wrong_source', self.remote_name)
self.execute_non_existing()
def test_non_existing(self):
self.execute_non_existing()
def execute_non_existing(self):
self.execute_link([Link(self.r, (self.source, self.__name))
.create_single_command(None)])
def execute_link(self, expected):
self.execute(Link(self.r, (self.source, self.__name)), expected)
def test_dir(self):
name = self.remote_name
mkdir(name)
chmod(name, S_IRUSR|S_IWUSR)
self.execute(Directory(self.r, (self.__name, MakeExecutable)),
['chmod 755 /' + self.__name])
def test_dir_str_instead_of_tuple(self):
self.execute(Directory(self.r, 'a'), [mkdir_command('a')])
def execute(self, os_object, expected):
self.assertEqual(os_object.commands(), expected)
class TestSimple(unittest.TestCase):
def test_make_without_mode(self):
Directory(None, ('a', Make())).all_commands()
def test_make_execute_dry(self):
# TODO: Make it possible to instantiate Make without parameter.
Make(NoManipulation.desired_mode).execute_dry(
'/non_existing_dir', None)
def test_ensure_contains(self):
data = 'd'
name = join(on_exit_vanishing_dtemp(), 'a')
ensure_contains(name, data)
ensure_contains(name, data)
self.assertEqual(file_content(name), data)
def test_write_written_data_is_immediately_available(self):
data = 'x'
name = join(on_exit_vanishing_dtemp(), 'a')
write(name, data)
self.assertEqual(file_content(name), data)
def test_overrides_are_instantiated_once_and_only_once(self):
class Once:
tally = []
def __init__(self, system_object):
self.tally.append(None)
s = All(non_existent, Once)
system_object_init(s)
s.overrides()
s.overrides()
self.assertEqual(len(Once.tally), 1)
def test_non_yet_existing_user_with_shell(self):
shell = 'test_shell'
class HasGetent:
def getent(self, db, name):
return None, name
def command_lists(method):
return [getattr(
x(HasGetent(), ('testusername', ('shell', shell))),
method)() for x in (User, UsersGroups)]
self.assertEqual(command_lists('commands'),
command_lists('all_commands'))
def test_root_is_local_files(self):
self.assertEqual(root_is_local().accumulate('files'), [])
def test_make_executable_current_mode_file_doesnt_exist(self):
default_mode = NoManipulation.desired_mode
self.assertEqual(MakeExecutable.current_mode(
"/This file doesn't exist.", default_mode),
default_mode)
def test_option_with_values(self):
self.assertEqual(option_with_values('option_name', ('value',)),
['--option_name value'])
def test_el_services_only_once(self):
class El(FromExaminationOfSystem):
def test_accumulate(self):
return [None]
s = All(non_existent)
s.init(distribution_centos, OptionsClassDummy,
lambda name: {'El': El}[name])
self.assertEqual(len(s.accumulate('test_accumulate')), 1)
def test_accumulate_accumulates_plain_value(self):
self.execute_accumulate_accumulates_from_super(2)
def test_accumulate_accumulates_from_super(self):
self.execute_accumulate_accumulates_from_super([2])
def execute_accumulate_accumulates_from_super(self, subclass_ret_val):
class Super(All):
def to_be_accumulated(self):
return [1]
class Sub(Super):
def to_be_accumulated(self):
return subclass_ret_val
s = Sub(non_existent)
system_object_init(s)
self.assertEqual(set(s.accumulate('to_be_accumulated')),
set((1, 2)))
def test_process_relevant_hosts(self):
# Setup fixture:
def raise_calledproceserror():
raise CalledProcessError(7, 'command to be mentioned')
class DummyOptionParser:
def __getattr__(self, name):
return lambda *args, **kwargs: None
def parse_args(self):
class Options:
run_mode = tunix
skip = ''
host_specification = None
return Options()
# Execution shall have stopped after exception:
self.assertRaises(SystemExit, process_relevant_hosts,
lambda args, host: True,
lambda host, args: host.action(),
DummyOptionParser(), StringIO(),
lambda distribution, runmode:
[HostDummy(raise_calledproceserror), HostDummy(tunix)])
def test_get_remote(self):
"""Reason for this test was a bug in All.get_remote where read
was accidentally called against the filename instead of the
file object."""
s = RootIsLocal()
file_name = join(s.remote_root(), 't')
self.assertEqual('', s.get_remote('t'))
def test_real_run_permission_transfer(self):
class RemoteRootLocal:
def __init__(self):
self.tmp_dir = on_exit_vanishing_dtemp()
def remote_root(self):
return self.tmp_dir
tree = on_exit_vanishing_dtemp()
f_name = 'xyz'
in_tree = join(tree, f_name)
open(in_tree, 'w').close()
mode = 0777
chmod(in_tree, mode)
s = RemoteRootLocal()
RealRun(s).rsync(tree)
self.assertEqual(stat_mode(join(s.remote_root(), f_name)), mode)
def test_generated(self):
remote_name = 'y'
FP1 = file_parts_class(Override, remote_name, 'c1')
self.assertEqual(root_is_local(FP1,
file_parts_class(Override, remote_name, 'c2'))
.generated_files(), [(remote_name, 'c1\n\nc2\n')])
def test_system_object_file_parts(self):
remote_name = 'w'
class HasFileParts(RootIsLocal):
def file_parts(self):
return [(remote_name, 'c3')]
s = HasFileParts()
system_object_init(s, get_distribution_classes_empty,
RunModeMock())
self.assertEqual(s.generated_files(), [(remote_name, 'c3\n')])
def test_non_existing_os_class_raises_releasenum(self):
self.execute_non_existing_os_class_raises('6')
def test_non_existing_os_class_raises(self):
self.execute_non_existing_os_class_raises()
def execute_non_existing_os_class_raises(self, *args):
self.assertRaises(KeyError, get_conf_attr, 'SL', *args)
def test_is_desired_object(self):
self.for_all_distribution_objects(self.assertEqual, get_conf_attr,
lambda o, o_name: (o.__class__.__name__, o_name))
# for o in ('Ubuntu', 'Debian', 'CentOS'):
# self.assertEqual(getattr(predefined, o)(None)
# .__class__.__name__, o)
def test_superclass_from_get_conf_attr(self):
class DesiredSuperclass(Override):
pass
self.for_all_distribution_objects(self.assertTrue,
lambda name: DesiredSuperclass,
lambda o, o_name:
(isinstance(o, DesiredSuperclass), o_name))
def for_all_distribution_objects(self, test_func, get_conf_attr,
test_result_func):
for o_name in ('Ubuntu', 'Debian', 'CentOS'):
s = RootIsLocal()
s.init(None, OptionsClassDummy, get_conf_attr)
test_func(*test_result_func(
getattr(predefined, o_name)(s),
o_name))
def test_is_wanted_false(self):
self.execute_test_is_wanted('Xyz', False)
def test_is_wanted_true(self):
self.execute_test_is_wanted('All', True)
def execute_test_is_wanted(self, description, expected):
s = All(non_existent)
class CountCalls:
count = 0
def __call__(self, system_object):
self.count += 1
return []
count_calls = CountCalls()
system_object_init(s, count_calls)
result = bool(s.is_wanted(description))
self.assertEqual((result, count_calls.count), (expected, 0))
def test_override_instances_from_examination_of_system_not_ok(self):
self.assertRaises(NotSubclassOfFromExaminationOfSystem,
self.execute_override_instances_from_examination_of_system,
Override)
def test_override_instances_from_examination_of_system_ok(self):
self.execute_override_instances_from_examination_of_system(
FromExaminationOfSystem)
def execute_override_instances_from_examination_of_system(self,
class_from_examination_of_system):
s = All(non_existent)
system_object_init(s,
lambda get_remote: [class_from_examination_of_system])
s.override_instances_from_examination_of_system()
def test_dry_run_rcmd_takes_all_parameters(self):
class CommandStub:
def output_catcher(self):
return None
def commands(self):
return [None]
setattr(process_hosts, 'print', tunix)
DryRun(None).conditional_cmds([CommandStub()])
def test_packages_all_subclass(self):
test_packages = ['pkg-xyz']
class AllDerived(RootIsLocal):
def packages(self):
return test_packages
s = run_all_any_system_class(AllDerived)
self.assertEqual(s.cmds, ['yum install -y pkg-xyz'
+ s.all_object.check_installation(test_packages)])
def test_without_rear(self):
self.assertFalse(run_all_root_is_local().cmds)
def test_services_in_post_inst(self):
class TestOverride(Override):
def services(self):
return ['service1_override', 'service2_override']
class AllDerived(All):
def services(self):
return ['service1_derived', 'service2_derived']
self.assertEqual(['chkconfig service1_derived on',
'chkconfig service2_derived on',
'chkconfig service1_override on',
'chkconfig service2_override on'],
instantiate_init_noname(AllDerived, TestOverride).\
post_overlay_commands())
def test_services_in_links(self):
class AllDerived(All):
links = link_in_same_dir('t', in_rcd_initd('t'))
self.assertEqual(instantiate_init_noname(AllDerived).services(),
['t'])
def test_create_all_users_before_modifying_them(self):
"""All users need to be created first."""
class AllDerived(All):
users = [('user_a', 'user_as_group'),
('user_b', 'user_bs_group')]
s = re.compile('(useradd|usermod) ')
self.assertEqual([s.search(x).groups()[0] for x in sum(
[c.all_commands() for c in
instantiate_init_noname(AllDerived).users_cmds()], [])
], ['useradd', 'useradd', 'usermod', 'usermod'])
def test_search_non_callable(self):
expected = "this isn't callable"
class OverrideToBeFound(Override):
this_exists = expected
self.execute_search_existing(OverrideToBeFound, expected)
def test_search_existing(self):
expected = 'expected result'
class OverrideToBeFound(Override):
def this_exists(self):
return expected
self.execute_search_existing(OverrideToBeFound, expected)
def execute_search_existing(self, cls, expected):
class SystemClass(RootIsLocal):
def this_exists(self):
return 'wrong result'
self.assertEqual(non_existent_centos_any_system_class(SystemClass,
RunModeClassDummy, cls).search.this_exists, expected)
def test_search_prefers_userdefined_overrides(self):
expected = 'expected result'
class A(Override):
this_exists = expected
class SystemClass(FromExaminationOfSystem):
this_exists = 'wrong result'
s = All(non_existent, A)
system_object_init(s, lambda get_remote: [SystemClass],
RunModeClassDummy)
self.assertEqual(s.search.this_exists, expected)
def test_search_nonexisting(self):
self.assertRaises(AttributeError,
lambda: non_existent_centos(RunModeClassDummy)
.search.this_method_doesnt_exist)
def test_call_object_attr(self):
class WithCallableWhichRaises:
def access_non_existing_attr(self):
self.non_existing_attr
# XXX: Unfortunately the StringIO can't be checked together with
# the exception:
self.assertRaises(AttributeErrorInCallable, call_object_attr,
WithCallableWhichRaises(), 'access_non_existing_attr',
StringIO())
def test_derived_non_existent_centos(self):
self.assertEqual(derived_non_existent_centos().generated_files(),
[derived_non_existent_centos_file])
def test_name_in_override(self):
class OSub(Override):
def services(self2):
self.assertEqual(self2.name, non_existent)
return []
run_all_root_is_local(OSub)
def test_diff_pdf_no_difference(self):
self.diff_pdf('', '')
def test_diff_pdf_difference(self):
self.diff_pdf('y',
self.diff_pdf_file_name + not_printable_diff_text)
def diff_pdf(self, content_a, expected):
out = StringIO()
diff(content_a, '', self.diff_pdf_file_name, out)
self.assertEqual(out.getvalue(), expected)
diff_pdf_file_name = 'x.pdf'
def test_print_dest_prints_once_per_host(self):
output = StringIO()
def print_dest():
h.print_dest()
return output.getvalue()
h = All(non_existent)
h.output = output
out1 = print_dest()
self.assertEqual(out1, print_dest())
def test_forward_to_standard(self):
class HasRead:
def read(self, *args):
return ''
class HasPrintDest:
def print_dest(self):
pass
ap = AlwaysPrintDestination(HasPrintDest())
sw = StdWrapper(HasRead(), ap, ap.take_stdout)
peculiarities = ap.peculiarities()
peculiarities.save_settings()
try:
sw.process()
sw.process()
finally:
peculiarities.reset_settings()
def test_communicate_with_child(self):
test_val = 'A'
self.execute_communicate_with_child(test_val,
None, CatchStdout(),
'echo', '-n', test_val)
def test_interaction_communicate_with_child(self):
test_val = 'xyz'
class Interact(remote_exec.ForwardToStd):
stdout = ''
remotes_stdin_is_waiting = True
def take_stdout(self, s, peculiarities):
self.stdout += s
if self.stdout == test_val\
and self.remotes_stdin_is_waiting:
self.remotes_stdin_is_waiting = False
remotes_stdin = self.p.stdin
remotes_stdin.write('\n')
remotes_stdin.flush()
self.execute_communicate_with_child(test_val, PIPE, Interact(),
'bash', '-c', 'echo -n {} && read'.format(test_val))
def execute_communicate_with_child(self, test_val, remotes_stdin,
output_catcher, *popen_args):
p = Popen(popen_args, stdin=remotes_stdin,
stdout=output_catcher.remotes_stdout,
stderr=PIPE)
output_catcher.p = p
communicate_with_child(p,
output_catcher, tunix, None)
self.assertEqual(output_catcher.stdout, test_val)
def test_memoize(self):
b_ret = 13469
class A:
count = 0
@memoize
def b(self):
self.count += 1
return b_ret
a = A()
self.assertEqual((a.b(), a.b(), a.count), (b_ret, b_ret, 1))
def test_testreplacelibattr(self):
test_val = 'value before manipulation'
manipulation = 'test manipulation'
class ModuleMock:
our_test_attr_in_module = test_val
module = ModuleMock()
class TestReplaceLibAttr(test_util.TestReplaceLibAttr):
def runTest(self):
pass
trla = TestReplaceLibAttr()
trla.setUp()
trla.manipulate_module(module,
'our_test_attr_in_module', manipulation)
after_manipulation = module.our_test_attr_in_module
trla.tearDown()
self.assertEqual((test_val, manipulation),
(module.our_test_attr_in_module, after_manipulation))
def test_shall_i_process_host_dont_touch(assert_func,
shall_i_process_host_object):
s = All(non_existent, DontTouch)
s.init(get_distribution_classes_empty, OptionsClassDummy,
lambda x: All)
assert_func(shall_i_process_host_object('All', s))
def test_shall_i_process_host(assert_func, opt_limit,
positive_class='CentOS'):
assert_func(shall_i_process_host(positive_class + opt_limit,
non_existent_centos_runmodemock()))
class TestRealRunStoreRemote(test_util.TestReplaceLibAttr):
def test_new_file_in_nonexisting_dir(self):
non_existent_centos(RealRun).store_remote('a/b', 'c')
def test_avoid_superfluous_update(self):
class CountCalls:
count = 0
def __call__(self, *args):
self.count += 1
def store_remote():
s.store_remote(f_name, 'content')
s = non_existent_centos(RealRun)
f_name = 'f_name'
store_remote()
count_write_calls = CountCalls()
self.manipulate_module(process_hosts, 'write', count_write_calls)
store_remote()
self.assertFalse(count_write_calls.count)
def test_generated_files_are_stored_remote(self):
remote_name = 'c/d/e'
remote_content = 'store_remote test'
self.assertEqual(run_all_root_is_local(
file_parts_class(Override, remote_name, remote_content)
).stored_remote,
[(remote_name, remote_content + '\n')])
class TestShallIProcessHost(unittest.TestCase):
def test_default_all_dont_touch(self):
test_shall_i_process_host_dont_touch(self.assertFalse,
DefaultAll())
def test_shall_i_process_host_count_instances(self):
class All(predefined.All):
instance_count = 0
class CountsOnHostobject(Override):
def __init__(self, system_object):
Override.__init__(self, system_object)
system_object.instance_count += 1
s = All(non_existent, CountsOnHostobject)
d = locals()
s.init(get_distribution_classes_empty, OptionsClassDummy,
lambda x: d[x])
predefined.ShallIProcessHost(locals())('CountsOnHostobject', s)
self.assertEqual(s.instance_count, 1)
def test_shall_i_process_host_dont_touch(self):
# shall_i_process_host/ShallIProcessHost is used by
# distribute_command; distribute_command shall ignore DontTouch.
test_shall_i_process_host_dont_touch(self.assertTrue,
shall_i_process_host)
def test_shall_i_process_host_centos(self):
test_shall_i_process_host(self.assertTrue, '')
def test_shall_i_process_host_centos_not_el(self):
test_shall_i_process_host(self.assertFalse, '!El')
def test_shall_i_process_host_centos_not_e(self):
test_shall_i_process_host(self.assertTrue, '!E')
def test_shall_i_process_host_xyz_not_e(self):
# CentOS mustn't be processed when Debian is requested.
test_shall_i_process_host(self.assertFalse, '!E', 'Debian')
def test_shall_i_process_host(self):
self.assertFalse(shall_i_process_host('a', All(non_existent)))
def test_shall_i_process_host_el(self):
self.assertTrue(shall_i_process_host('El',
non_existent_centos(RunModeClassDummy)))
def test_shall_i_process_host_centos_not_usual(self):
test_is_wanted(self.assertFalse, '!Usual',
lambda s, selection, module:
predefined.ShallIProcessHost(module)
(selection, s))
def test_is_wanted(self):
test_is_wanted(self.assertTrue)
def test_is_not_wanted(self):
test_is_wanted(self.assertFalse, '!Usual')
def test_xyz_is_not_wanted(self):
test_is_wanted(self.assertFalse, '!Usual', distribution='Debian',
system_classes=())
def test_hosts_with_class(self):
class B(All):
pass
name_of_system_of_desired_class = 'Desired'
hosts = [All('a'), B(name_of_system_of_desired_class)]
map(system_object_init, hosts)
self.assertEqual(hosts_with_class('B', {'hosts': hosts, 'B': B}),
[name_of_system_of_desired_class])
class Usual(Override):
pass
def test_is_wanted(assert_func, opt_limit='',
call_on_s=lambda s, selection, module:
s.is_wanted(selection),
distribution='CentOS', system_classes=(Usual,)):
def local_get_conf_attr(name):
return mm[name]
class MuduleMok:
def __getitem__(self, name):
try:
return get_conf_attr(name)
except KeyError:
if name == 'Usual':
return Usual
else:
raise
mm = MuduleMok()
s = RootIsLocal(*system_classes)
s.init(distribution_centos, OptionsClassDummy, local_get_conf_attr)
assert s.issubclass('CentOS', ClassOfSystems)
assert s.issubclass('Usual', ClassOfSystems)
assert_func(call_on_s(s, distribution + opt_limit, mm))
def get_distribution_classes_empty(get_remote):
return []
def file_parts_class(super_class, remote_name, remote_content):
class HasFileParts(super_class):
def file_parts(self):
return [(remote_name, remote_content),]
return HasFileParts
class CreateHost:
def create_host(self):
return non_existent_centos_runmodemock()
class TestRpmsaveFromRpmsBugNoAction(test_util.TestRpmsaveFromRpmsBug,
CreateHost):
rpmnew_content = 'x' # differs from file under pristine_for_rpms_bug
def test_rpmnew_untouched(self):
self.assertEqual(
file_content(self.execute_test() + '.rpmnew'),
'x')
def test_conf_file_untouched(self):
self.assertEqual(file_content(self.execute_test()), '')
class TestRpmsaveFromRpmsBugAction(test_util.TestRpmsaveFromRpmsBugAction,
CreateHost):
pass
class TestConfigurationInTmp(TestInTmpDir):
colliding_name = 'colliding_name'
def test_override_dir_name_collision(self):
class HasSameFile(Override):
dirs = self.colliding_name
self.run_file_name_collision(HasSameFile)
def test_override_link_name_collision(self):
class HasSameFile(Override):
links = ('', self.colliding_name)
self.run_file_name_collision(HasSameFile)
def test_override_name_collision(self):
class HasSameFile(Override):
files = (self.colliding_name, '')
self.run_file_name_collision(HasSameFile)
def test_generated_tree_name_collision(self):
touch(join(self.mk_all(), self.colliding_name))
self.run_file_name_collision()
def run_file_name_collision(self, *overrides):
s = RootIsLocal(file_parts_class(Override,
self.colliding_name, ''), *overrides)
system_object_init(s, get_distribution_classes_empty)
self.assertRaises(FileNameCollision, lambda: s.trees())
test_service_name = 'test_service'
example_service_file_name = join(All.systemd_service_dir,
test_service_name + All.service_extension)
def test_service_detection_in_generated_files(self):
class DerivedAll(All):
files = (self.example_service_file_name, None)
self.assertEqual(instantiate_init_noname(DerivedAll).services(),
[self.test_service_name])
def test_systemd_service_detection(self):
self.execute_test_service_detection(self.example_service_file_name,
self.test_service_name)
def test_service_detection(self):
test_service_name = self.test_service_name
self.execute_test_service_detection(
in_rcd_initd(test_service_name), test_service_name)
def execute_test_service_detection(self, file_name, expected):
self.mk_all(file_name)
self.assertEqual(non_existent_centos_runmodemock().services(),
[expected])
def mk_all(self, *dir_name):
ret = join(dir_of_tree('All'), *dir_name)
makedirs(ret)
return ret
def test_host_none_exclusion(self):
dir_name = ['All!othernonexisting']
self.run_all(dir_name, dir_name)
def test_host_exclusion(self):
self.run_all(['All!' + non_existent], [])
def test_host_exclusion_all(self):
self.run_all([non_existent + '!All'], [])
def test_non_exclusion_of_first_item(self):
should_be_included = ['All!Xyz']
self.run_all(should_be_included, should_be_included)
def test_double_exclusion(self):
self.run_all(['All!TestOverride!TestOverride2'], [], TestOverride2)
def test_system_class_exclusion(self):
self.run_all(self.available_trees_class_exclusion,
['All', 'TestOverride'], TestOverride)
def test_no_system_class_exclusion(self):
self.run_all(self.available_trees_class_exclusion,
['All', 'All!TestOverride'])
available_trees_class_exclusion\
= ['TestOverride', 'All', 'All!TestOverride']
def run_all(self, available_trees, expected, *args):
for tree in available_trees:
makedirs(dir_of_tree(tree))
s = RootIsLocal(*args)
s.init(distribution_centos, options_from_runmode(RunModeMock()),
lambda name: globals()[name])
self.assertEqual(s.trees(), set(expected))
class TestOverride(Override):
pass
class TestOverride2(Override):
pass
class Xyz(Override):
pass
class TestTemplateEntry(test_util.TestReplaceLibAttr):
def test_alternate_source(self):
self.manipulate_module(predefined, 'render_template',
lambda dest_file_name, **kwargs:
(dest_file_name, kwargs))
dest = 'A'
src = 'B'
o = predefined.ClassOfSystems()
o.search = None
self.assertEqual(o.template_entry(dest,
src=src), (dest, (src, {'search': None})))
class TestInTmpDir(TestInTmpDir):
def test_mkdir_p_rel_path(self):
path = 'xyz'
mkdir_p(path)
self.assertTrue(isdir(path))
def test_file_parts_entry(self):
template_dir = join(self.tmp_dir, predefined.template_dir)
mkdir(template_dir)
write(join(template_dir, 'entry'),
'begin {{ template_variable }} end\n')
dest = 'file/parts/entry'
self.assertEqual(file_parts_entry(dest,
template_variable='variable value'),
# Posix files end with '\n':
(dest, 'begin variable value end\n'))
conf_content = 'a'
conf_dir = 'etc'
def test_rpmsave_eq_pristine_in_dir(self):
self.assertFalse(
self.rpmnew_exists(self.conf_content, self.conf_dir))
def test_rpmsave_eq_pristine(self):
self.assertFalse(self.rpmnew_exists(self.conf_content))
def test_rpmsave_ne_pristine(self):
self.assertTrue(self.rpmnew_exists('b'))
def rpmnew_exists(self, rpmnew_content, directory=''):
c = self.conf_content
p5 = 'pristine/5'
def mkdir_write(conf_dir):
mkdir_p(conf_dir)
write_file(join(conf_dir, c), c)
mkdir_write(p5)
mkdir_write(join(p5, self.conf_dir))
return lexists(test_util.execute_rpmnew_test(
non_existent_centos_runmodemock(),
rpmnew_content, directory, 'a')
+ '.rpmnew')
class TestDoFileList(TestInTmpDir):
def test_real_run_dry_run_do_file_list(self):
def append_to_file_list(run_mode):
run_mode.append_to_file_list('zzz')
run_mode.append_to_file_list('yyy')
run_mode = RealRun(HasName)
append_to_file_list(run_mode)
run_mode.do_file_list()
self.dry_run_do_file_list(append_to_file_list)
def test_dry_run_do_file_list(self):
self.dry_run_do_file_list(tunix)
def dry_run_do_file_list(self, action_on_run_mode):
s = StringIO()
run_mode = DryRun(HasName)
action_on_run_mode(run_mode)
run_mode.do_file_list(s)
self.assertEqual(s.getvalue(), '')
class HasName:
name = 'n'
class TestReplaceLibAttr(test_util.TestReplaceLibAttr):
def test_release_other5(self):
class Fictional(FromExaminationOfSystem):
pass
def get_conf_attr_f(name, release_major_=''):
if name == 'Fictional' and release_major_:
class Fictional5(FromExaminationOfSystem):
release_major = release_major_
return Fictional5
try:
return {'Fictional': Fictional}[name]
except KeyError:
return get_conf_attr(name, release_major_)
get_conf_attr = process_hosts.get_conf_attr
self.manipulate_module(process_hosts,
'get_conf_attr', get_conf_attr_f)
self.execute_release('Fictional Linux 5', 5, 'dvd')
# The following functions don't use the tear down mechanism:
def test_release_5(self):
self.execute_release('CentOS release 5.7 (Final)', 5, 'hdc')
def test_release_6(self):
self.execute_release('Scientific Linux release 6.1 (Carbon)',
6, 'dvd')
def execute_release(self, issue, major_release, dvd_device):
s = ClassesFromExaminationOfRunningSystem(issue)
system_object_init(s, classes_from_examination_of_running_system,
RunModeClassDummy)
search = s.search
self.assertEqual((search.release_major, search.inst_media_dev),
(str(major_release), dvd_device))
def test_get_pretty_name(self):
self.assertEqual(process_hosts.get_pretty_name(StringIO(
"""NAME="Scientific Linux"
VERSION="7.1 (Nitrogen)"
ID="rhel"
ID_LIKE="fedora"
VERSION_ID="7.1"
PRETTY_NAME="Scientific Linux 7.1 (Nitrogen)"
ANSI_COLOR="0;31"
CPE_NAME="cpe:/o:redhat:enterprise_linux:7.1:GA"
HOME_URL="http://www.scientificlinux.org//"
BUG_REPORT_URL="scientific-linux-devel@listserv.fnal.gov"
REDHAT_BUGZILLA_PRODUCT="Scientific Linux 7"
REDHAT_BUGZILLA_PRODUCT_VERSION=7.1
REDHAT_SUPPORT_PRODUCT="Scientific Linux"
REDHAT_SUPPORT_PRODUCT_VERSION=7.1
""")), "Scientific Linux 7.1 (Nitrogen)")
def test_non_el(self):
classes_from_names(['Debian'])
def instantiate_init_noname(klass, *overrides):
s = klass('', *overrides)
system_object_init(s)
return s
derived_non_existent_centos_file_name = 'a'
derived_non_existent_centos_file = (derived_non_existent_centos_file_name,
'd')
def derived_non_existent_centos(*args):
s = RootIsLocal(*args)
def derived_centos(get_remote):
class CentOS(FromExaminationOfSystem):
def files(self):
return [derived_non_existent_centos_file]
return [CentOS]
system_object_init(s, derived_centos)
return s
def non_existent_centos_runmodemock(*args):
return non_existent_centos(RunModeMock(), *args)
def non_existent_centos(run_mode, *args):
return non_existent_centos_any_system_class(RootIsLocal,
run_mode, *args)
def non_existent_centos_any_system_class(system_class, run_mode, *args):
# RootIsLocal instead of All avoids Offline Exceptions:
s = system_class(*args)
system_object_init(s, distribution_centos, run_mode)
s.release_major = '5' # according to
# aslib/test_sites/test_files/pristine/5
return s
def root_is_local(*args):
return root_is_local_sm(*args)[0]
def run_all_root_is_local(*args):
return root_is_local_sm(*args)[1]
def root_is_local_sm(*args):
m = RunModeMock()
s = RootIsLocal(*args)
system_object_init(s, get_distribution_classes_empty, m)
s.rsync()
return (s, m)
def run_all_any_system_class(system_class, *args):
m = RunModeMock()
s = non_existent_centos_any_system_class(system_class, m, *args)
s.rsync()
return m
class RootIsLocal(All):
def __init__(self, *args):
All.__init__(self, non_existent, *args)
self.tmp_dir = on_exit_vanishing_dtemp()
def remote_root(self):
return self.tmp_dir
def query_installed(self, names):
return []
class ClassesFromExaminationOfRunningSystem(RootIsLocal):
def __init__(self, issue, *args):
RootIsLocal.__init__(self, *args)
self.__issue = issue
def get_remote(self, src_file):
return self.__issue
def remote_dtemp(self):
return self.tmp_dir
def ssh_silent_stdout(self, *args, **kwargs):
return 'Physical'
class TestNonExistentAll(unittest.TestCase):
def setUp(self):
s = All(non_existent)
system_object_init(s, classes_from_examination_of_running_system,
RunModeClassDummy)
s.filter_masters_stderr = list # reads stderr of master_openssh
self.host = s
def test_connection_failure(self):
self.assertRaises(Offline,
self.host.override_instances_from_examination_of_system)
def test_connection_failure_direct_ssh(self):
self.assertRaises(Offline, self.host.ssh, 'true')
def test_shall_i_process_host(self):
self.assertTrue(shall_i_process_host(non_existent, self.host))
class HostDummy:
name = None
def __init__(self, action):
self.action = action
class OptionsClassDummy: # simulate options object
@classmethod
def run_mode(cls, all_object):
pass
diffpdf = tunix
def touch(file_name):
open(file_name, 'w').close()
unittest.main()
| gpl-3.0 |
urba1n/fabric-bolt | src/fabric_bolt/accounts/tables.py | 14 | 1653 | """
Tables for the account app
"""
from django.contrib.auth import get_user_model
import django_tables2 as tables
from fabric_bolt.core.mixins.tables import ActionsColumn, PaginateTable
class UserListTable(PaginateTable):
"""
Table for displaying users.
"""
actions = ActionsColumn([
{'title': '<i class="glyphicon glyphicon-file"></i>', 'url': 'accounts_user_view', 'args': [tables.A('pk')],
'attrs':{'data-toggle': 'tooltip', 'title': 'View User', 'data-delay': '{ "show": 300, "hide": 0 }'}},
{'title': '<i class="glyphicon glyphicon-pencil"></i>', 'url': 'accounts_user_change', 'args': [tables.A('pk')],
'attrs':{'data-toggle': 'tooltip', 'title': 'Edit User', 'data-delay': '{ "show": 300, "hide": 0 }'}},
{'title': '<i class="glyphicon glyphicon-trash"></i>', 'url': 'accounts_user_delete', 'args': [tables.A('pk')],
'attrs':{'data-toggle': 'tooltip', 'title': 'Delete User', 'data-delay': '{ "show": 300, "hide": 0 }', 'class': 'js-delete'}},
], delimiter='   ')
email = tables.Column(verbose_name='Email')
first_name = tables.Column(verbose_name='First Name')
last_name = tables.Column(verbose_name='Last Name')
user_level = tables.Column(verbose_name='User Level', accessor='group_strigify', order_by='groups')
class Meta:
model = get_user_model()
sequence = fields = ('first_name', 'last_name', 'is_active', 'email', 'user_level', )
attrs = {'class': 'table table-striped table-bordered table-hover'}
def __init__(self, *args, **kwargs):
super(UserListTable, self).__init__(*args, **kwargs)
| mit |
ProteinDF/ProteinDF_bridge | tests/test_atom.py | 1 | 2087 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2002-2014 The ProteinDF project
# see also AUTHORS and README.
#
# This file is part of ProteinDF.
#
# ProteinDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ProteinDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ProteinDF. If not, see <http://www.gnu.org/licenses/>.
import unittest
import pickle
import doctest
from proteindf_bridge.atom import Atom
class AtomTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
atom = Atom()
atom.symbol = 'Fe'
self.assertEqual(atom.atomic_number, 26)
def test_init2(self):
atom = Atom(symbol='Ni', position=[0.0, 1.0, 2.0])
self.assertEqual(atom.atomic_number, 28)
self.assertAlmostEqual(atom.xyz[0], 0.0)
self.assertAlmostEqual(atom.xyz[1], 1.0)
self.assertAlmostEqual(atom.xyz[2], 2.0)
def test_init3(self):
atom = Atom(symbol='C', xyz="0.0 1.0 2.0")
self.assertEqual(atom.atomic_number, 6)
self.assertAlmostEqual(atom.xyz[0], 0.0)
self.assertAlmostEqual(atom.xyz[1], 1.0)
self.assertAlmostEqual(atom.xyz[2], 2.0)
def test_pickle(self):
atom1 = Atom()
atom1.symbol = 'Na'
b = pickle.dumps(atom1)
atom2 = pickle.loads(b)
self.assertIsInstance(atom2, Atom)
self.assertEqual(atom2.symbol, 'Na')
def load_tests(loader, tests, ignore):
from proteindf_bridge import atom
tests.addTests(doctest.DocTestSuite(atom))
return tests
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.