repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
eti1337/arc-theme-orange | common/cinnamon/node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | 2878 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
faarwa/EngSocP5 | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Action.py | 34 | 46855 | """SCons.Action
This encapsulates information about executing any sort of action that
can build one or more target Nodes (typically files) from one or more
source Nodes (also typically files) given a specific Environment.
The base class here is ActionBase. The base class supplies just a few
OO utility methods and some generic methods for displaying information
about an Action in response to the various commands that control printing.
A second-level base class is _ActionAction. This extends ActionBase
by providing the methods that can be used to show and perform an
action. True Action objects will subclass _ActionAction; Action
factory class objects will subclass ActionBase.
The heavy lifting is handled by subclasses for the different types of
actions we might execute:
CommandAction
CommandGeneratorAction
FunctionAction
ListAction
The subclasses supply the following public interface methods used by
other modules:
__call__()
THE public interface, "calling" an Action object executes the
command or Python function. This also takes care of printing
a pre-substitution command for debugging purposes.
get_contents()
Fetches the "contents" of an Action for signature calculation
plus the varlist. This is what gets MD5 checksummed to decide
if a target needs to be rebuilt because its action changed.
genstring()
Returns a string representation of the Action *without*
command substitution, but allows a CommandGeneratorAction to
generate the right action based on the specified target,
source and env. This is used by the Signature subsystem
(through the Executor) to obtain an (imprecise) representation
of the Action operation for informative purposes.
Subclasses also supply the following methods for internal use within
this module:
__str__()
Returns a string approximation of the Action; no variable
substitution is performed.
execute()
The internal method that really, truly, actually handles the
execution of a command or Python function. This is used so
that the __call__() methods can take care of displaying any
pre-substitution representations, and *then* execute an action
without worrying about the specific Actions involved.
get_presig()
Fetches the "contents" of a subclass for signature calculation.
The varlist is added to this to produce the Action's contents.
strfunction()
Returns a substituted string representation of the Action.
This is used by the _ActionAction.show() command to display the
command/function that will be executed to generate the target(s).
There is a related independent ActionCaller class that looks like a
regular Action, and which serves as a wrapper for arbitrary functions
that we want to let the user specify the arguments to now, but actually
execute later (when an out-of-date check determines that it's needed to
be executed, for example). Objects of this class are returned by an
ActionFactory class that provides a __call__() method as a convenient
way for wrapping up the functions.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Action.py 5023 2010/06/14 22:05:46 scons"
import SCons.compat
import dis
import os
# compat layer imports "cPickle" for us if it's available.
import pickle
import re
import sys
import subprocess
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Executor
import SCons.Util
import SCons.Subst
# we use these a lot, so try to optimize them
is_String = SCons.Util.is_String
is_List = SCons.Util.is_List
class _null(object):
pass
print_actions = 1
execute_actions = 1
print_actions_presub = 0
def rfile(n):
try:
return n.rfile()
except AttributeError:
return n
def default_exitstatfunc(s):
return s
try:
SET_LINENO = dis.SET_LINENO
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
except AttributeError:
remove_set_lineno_codes = lambda x: x
else:
def remove_set_lineno_codes(code):
result = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
if op >= HAVE_ARGUMENT:
if op != SET_LINENO:
result.append(code[i:i+3])
i = i+3
else:
result.append(c)
i = i+1
return ''.join(result)
strip_quotes = re.compile('^[\'"](.*)[\'"]$')
def _callable_contents(obj):
"""Return the signature contents of a callable Python object.
"""
try:
# Test if obj is a method.
return _function_contents(obj.im_func)
except AttributeError:
try:
# Test if obj is a callable object.
return _function_contents(obj.__call__.im_func)
except AttributeError:
try:
# Test if obj is a code object.
return _code_contents(obj)
except AttributeError:
# Test if obj is a function object.
return _function_contents(obj)
def _object_contents(obj):
"""Return the signature contents of any Python object.
We have to handle the case where object contains a code object
since it can be pickled directly.
"""
try:
# Test if obj is a method.
return _function_contents(obj.im_func)
except AttributeError:
try:
# Test if obj is a callable object.
return _function_contents(obj.__call__.im_func)
except AttributeError:
try:
# Test if obj is a code object.
return _code_contents(obj)
except AttributeError:
try:
# Test if obj is a function object.
return _function_contents(obj)
except AttributeError:
# Should be a pickable Python object.
try:
return pickle.dumps(obj)
except (pickle.PicklingError, TypeError):
# This is weird, but it seems that nested classes
# are unpickable. The Python docs say it should
# always be a PicklingError, but some Python
# versions seem to return TypeError. Just do
# the best we can.
return str(obj)
def _code_contents(code):
"""Return the signature contents of a code object.
By providing direct access to the code object of the
function, Python makes this extremely easy. Hooray!
Unfortunately, older versions of Python include line
number indications in the compiled byte code. Boo!
So we remove the line number byte codes to prevent
recompilations from moving a Python function.
"""
contents = []
# The code contents depends on the number of local variables
# but not their actual names.
contents.append("%s,%s" % (code.co_argcount, len(code.co_varnames)))
try:
contents.append(",%s,%s" % (len(code.co_cellvars), len(code.co_freevars)))
except AttributeError:
# Older versions of Python do not support closures.
contents.append(",0,0")
# The code contents depends on any constants accessed by the
# function. Note that we have to call _object_contents on each
# constants because the code object of nested functions can
# show-up among the constants.
#
# Note that we also always ignore the first entry of co_consts
# which contains the function doc string. We assume that the
# function does not access its doc string.
contents.append(',(' + ','.join(map(_object_contents,code.co_consts[1:])) + ')')
# The code contents depends on the variable names used to
# accessed global variable, as changing the variable name changes
# the variable actually accessed and therefore changes the
# function result.
contents.append(',(' + ','.join(map(_object_contents,code.co_names)) + ')')
# The code contents depends on its actual code!!!
contents.append(',(' + str(remove_set_lineno_codes(code.co_code)) + ')')
return ''.join(contents)
def _function_contents(func):
"""Return the signature contents of a function."""
contents = [_code_contents(func.func_code)]
# The function contents depends on the value of defaults arguments
if func.func_defaults:
contents.append(',(' + ','.join(map(_object_contents,func.func_defaults)) + ')')
else:
contents.append(',()')
# The function contents depends on the closure captured cell values.
try:
closure = func.func_closure or []
except AttributeError:
# Older versions of Python do not support closures.
closure = []
#xxx = [_object_contents(x.cell_contents) for x in closure]
try:
xxx = [_object_contents(x.cell_contents) for x in closure]
except AttributeError:
xxx = []
contents.append(',(' + ','.join(xxx) + ')')
return ''.join(contents)
def _actionAppend(act1, act2):
# This function knows how to slap two actions together.
# Mainly, it handles ListActions by concatenating into
# a single ListAction.
a1 = Action(act1)
a2 = Action(act2)
if a1 is None or a2 is None:
raise TypeError("Cannot append %s to %s" % (type(act1), type(act2)))
if isinstance(a1, ListAction):
if isinstance(a2, ListAction):
return ListAction(a1.list + a2.list)
else:
return ListAction(a1.list + [ a2 ])
else:
if isinstance(a2, ListAction):
return ListAction([ a1 ] + a2.list)
else:
return ListAction([ a1, a2 ])
def _do_create_keywords(args, kw):
"""This converts any arguments after the action argument into
their equivalent keywords and adds them to the kw argument.
"""
v = kw.get('varlist', ())
# prevent varlist="FOO" from being interpreted as ['F', 'O', 'O']
if is_String(v): v = (v,)
kw['varlist'] = tuple(v)
if args:
# turn positional args into equivalent keywords
cmdstrfunc = args[0]
if cmdstrfunc is None or is_String(cmdstrfunc):
kw['cmdstr'] = cmdstrfunc
elif callable(cmdstrfunc):
kw['strfunction'] = cmdstrfunc
else:
raise SCons.Errors.UserError(
'Invalid command display variable type. '
'You must either pass a string or a callback which '
'accepts (target, source, env) as parameters.')
if len(args) > 1:
kw['varlist'] = args[1:] + kw['varlist']
if kw.get('strfunction', _null) is not _null \
and kw.get('cmdstr', _null) is not _null:
raise SCons.Errors.UserError(
'Cannot have both strfunction and cmdstr args to Action()')
def _do_create_action(act, kw):
"""This is the actual "implementation" for the
Action factory method, below. This handles the
fact that passing lists to Action() itself has
different semantics than passing lists as elements
of lists.
The former will create a ListAction, the latter
will create a CommandAction by converting the inner
list elements to strings."""
if isinstance(act, ActionBase):
return act
if is_List(act):
return CommandAction(act, **kw)
if callable(act):
try:
gen = kw['generator']
del kw['generator']
except KeyError:
gen = 0
if gen:
action_type = CommandGeneratorAction
else:
action_type = FunctionAction
return action_type(act, kw)
if is_String(act):
var=SCons.Util.get_environment_var(act)
if var:
# This looks like a string that is purely an Environment
# variable reference, like "$FOO" or "${FOO}". We do
# something special here...we lazily evaluate the contents
# of that Environment variable, so a user could put something
# like a function or a CommandGenerator in that variable
# instead of a string.
return LazyAction(var, kw)
commands = str(act).split('\n')
if len(commands) == 1:
return CommandAction(commands[0], **kw)
# The list of string commands may include a LazyAction, so we
# reprocess them via _do_create_list_action.
return _do_create_list_action(commands, kw)
return None
def _do_create_list_action(act, kw):
"""A factory for list actions. Convert the input list into Actions
and then wrap them in a ListAction."""
acts = []
for a in act:
aa = _do_create_action(a, kw)
if aa is not None: acts.append(aa)
if not acts:
return ListAction([])
elif len(acts) == 1:
return acts[0]
else:
return ListAction(acts)
def Action(act, *args, **kw):
"""A factory for action objects."""
# Really simple: the _do_create_* routines do the heavy lifting.
_do_create_keywords(args, kw)
if is_List(act):
return _do_create_list_action(act, kw)
return _do_create_action(act, kw)
class ActionBase(object):
"""Base class for all types of action objects that can be held by
other objects (Builders, Executors, etc.) This provides the
common methods for manipulating and combining those actions."""
def __cmp__(self, other):
return cmp(self.__dict__, other)
def no_batch_key(self, env, target, source):
return None
batch_key = no_batch_key
def genstring(self, target, source, env):
return str(self)
def get_contents(self, target, source, env):
result = [ self.get_presig(target, source, env) ]
# This should never happen, as the Action() factory should wrap
# the varlist, but just in case an action is created directly,
# we duplicate this check here.
vl = self.get_varlist(target, source, env)
if is_String(vl): vl = (vl,)
for v in vl:
result.append(env.subst('${'+v+'}'))
return ''.join(result)
def __add__(self, other):
return _actionAppend(self, other)
def __radd__(self, other):
return _actionAppend(other, self)
def presub_lines(self, env):
# CommandGeneratorAction needs a real environment
# in order to return the proper string here, since
# it may call LazyAction, which looks up a key
# in that env. So we temporarily remember the env here,
# and CommandGeneratorAction will use this env
# when it calls its _generate method.
self.presub_env = env
lines = str(self).split('\n')
self.presub_env = None # don't need this any more
return lines
def get_varlist(self, target, source, env, executor=None):
return self.varlist
def get_targets(self, env, executor):
"""
Returns the type of targets ($TARGETS, $CHANGED_TARGETS) used
by this action.
"""
return self.targets
class _ActionAction(ActionBase):
"""Base class for actions that create output objects."""
def __init__(self, cmdstr=_null, strfunction=_null, varlist=(),
presub=_null, chdir=None, exitstatfunc=None,
batch_key=None, targets='$TARGETS',
**kw):
self.cmdstr = cmdstr
if strfunction is not _null:
if strfunction is None:
self.cmdstr = None
else:
self.strfunction = strfunction
self.varlist = varlist
self.presub = presub
self.chdir = chdir
if not exitstatfunc:
exitstatfunc = default_exitstatfunc
self.exitstatfunc = exitstatfunc
self.targets = targets
if batch_key:
if not callable(batch_key):
# They have set batch_key, but not to their own
# callable. The default behavior here will batch
# *all* targets+sources using this action, separated
# for each construction environment.
def default_batch_key(self, env, target, source):
return (id(self), id(env))
batch_key = default_batch_key
SCons.Util.AddMethod(self, batch_key, 'batch_key')
def print_cmd_line(self, s, target, source, env):
sys.stdout.write(s + u"\n")
def __call__(self, target, source, env,
exitstatfunc=_null,
presub=_null,
show=_null,
execute=_null,
chdir=_null,
executor=None):
if not is_List(target):
target = [target]
if not is_List(source):
source = [source]
if presub is _null:
presub = self.presub
if presub is _null:
presub = print_actions_presub
if exitstatfunc is _null: exitstatfunc = self.exitstatfunc
if show is _null: show = print_actions
if execute is _null: execute = execute_actions
if chdir is _null: chdir = self.chdir
save_cwd = None
if chdir:
save_cwd = os.getcwd()
try:
chdir = str(chdir.abspath)
except AttributeError:
if not is_String(chdir):
if executor:
chdir = str(executor.batches[0].targets[0].dir)
else:
chdir = str(target[0].dir)
if presub:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
t = ' and '.join(map(str, target))
l = '\n '.join(self.presub_lines(env))
out = u"Building %s with action:\n %s\n" % (t, l)
sys.stdout.write(out)
cmd = None
if show and self.strfunction:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
try:
cmd = self.strfunction(target, source, env, executor)
except TypeError:
cmd = self.strfunction(target, source, env)
if cmd:
if chdir:
cmd = ('os.chdir(%s)\n' % repr(chdir)) + cmd
try:
get = env.get
except AttributeError:
print_func = self.print_cmd_line
else:
print_func = get('PRINT_CMD_LINE_FUNC')
if not print_func:
print_func = self.print_cmd_line
print_func(cmd, target, source, env)
stat = 0
if execute:
if chdir:
os.chdir(chdir)
try:
stat = self.execute(target, source, env, executor=executor)
if isinstance(stat, SCons.Errors.BuildError):
s = exitstatfunc(stat.status)
if s:
stat.status = s
else:
stat = s
else:
stat = exitstatfunc(stat)
finally:
if save_cwd:
os.chdir(save_cwd)
if cmd and save_cwd:
print_func('os.chdir(%s)' % repr(save_cwd), target, source, env)
return stat
def _string_from_cmd_list(cmd_list):
"""Takes a list of command line arguments and returns a pretty
representation for printing."""
cl = []
for arg in map(str, cmd_list):
if ' ' in arg or '\t' in arg:
arg = '"' + arg + '"'
cl.append(arg)
return ' '.join(cl)
# A fiddlin' little function that has an 'import SCons.Environment' which
# can't be moved to the top level without creating an import loop. Since
# this import creates a local variable named 'SCons', it blocks access to
# the global variable, so we move it here to prevent complaints about local
# variables being used uninitialized.
default_ENV = None
def get_default_ENV(env):
global default_ENV
try:
return env['ENV']
except KeyError:
if not default_ENV:
import SCons.Environment
# This is a hideously expensive way to get a default shell
# environment. What it really should do is run the platform
# setup to get the default ENV. Fortunately, it's incredibly
# rare for an Environment not to have a shell environment, so
# we're not going to worry about it overmuch.
default_ENV = SCons.Environment.Environment()['ENV']
return default_ENV
# This function is still in draft mode. We're going to need something like
# it in the long run as more and more places use subprocess, but I'm sure
# it'll have to be tweaked to get the full desired functionality.
# one special arg (so far?), 'error', to tell what to do with exceptions.
def _subproc(scons_env, cmd, error = 'ignore', **kw):
"""Do common setup for a subprocess.Popen() call"""
# allow std{in,out,err} to be "'devnull'"
io = kw.get('stdin')
if is_String(io) and io == 'devnull':
kw['stdin'] = open(os.devnull)
io = kw.get('stdout')
if is_String(io) and io == 'devnull':
kw['stdout'] = open(os.devnull, 'w')
io = kw.get('stderr')
if is_String(io) and io == 'devnull':
kw['stderr'] = open(os.devnull, 'w')
# Figure out what shell environment to use
ENV = kw.get('env', None)
if ENV is None: ENV = get_default_ENV(scons_env)
# Ensure that the ENV values are all strings:
new_env = {}
for key, value in ENV.items():
if is_List(value):
# If the value is a list, then we assume it is a path list,
# because that's a pretty common list-like value to stick
# in an environment variable:
value = SCons.Util.flatten_sequence(value)
new_env[key] = os.pathsep.join(map(str, value))
else:
# It's either a string or something else. If it's a string,
# we still want to call str() because it might be a *Unicode*
# string, which makes subprocess.Popen() gag. If it isn't a
# string or a list, then we just coerce it to a string, which
# is the proper way to handle Dir and File instances and will
# produce something reasonable for just about everything else:
new_env[key] = str(value)
kw['env'] = new_env
try:
#FUTURE return subprocess.Popen(cmd, **kw)
return subprocess.Popen(cmd, **kw)
except EnvironmentError, e:
if error == 'raise': raise
# return a dummy Popen instance that only returns error
class dummyPopen(object):
def __init__(self, e): self.exception = e
def communicate(self): return ('','')
def wait(self): return -self.exception.errno
stdin = None
class f(object):
def read(self): return ''
def readline(self): return ''
stdout = stderr = f()
return dummyPopen(e)
class CommandAction(_ActionAction):
"""Class for command-execution actions."""
def __init__(self, cmd, **kw):
# Cmd can actually be a list or a single item; if it's a
# single item it should be the command string to execute; if a
# list then it should be the words of the command string to
# execute. Only a single command should be executed by this
# object; lists of commands should be handled by embedding
# these objects in a ListAction object (which the Action()
# factory above does). cmd will be passed to
# Environment.subst_list() for substituting environment
# variables.
if __debug__: logInstanceCreation(self, 'Action.CommandAction')
_ActionAction.__init__(self, **kw)
if is_List(cmd):
if list(filter(is_List, cmd)):
raise TypeError("CommandAction should be given only " \
"a single command")
self.cmd_list = cmd
def __str__(self):
if is_List(self.cmd_list):
return ' '.join(map(str, self.cmd_list))
return str(self.cmd_list)
def process(self, target, source, env, executor=None):
if executor:
result = env.subst_list(self.cmd_list, 0, executor=executor)
else:
result = env.subst_list(self.cmd_list, 0, target, source)
silent = None
ignore = None
while True:
try: c = result[0][0][0]
except IndexError: c = None
if c == '@': silent = 1
elif c == '-': ignore = 1
else: break
result[0][0] = result[0][0][1:]
try:
if not result[0][0]:
result[0] = result[0][1:]
except IndexError:
pass
return result, ignore, silent
def strfunction(self, target, source, env, executor=None):
if self.cmdstr is None:
return None
if self.cmdstr is not _null:
from SCons.Subst import SUBST_RAW
if executor:
c = env.subst(self.cmdstr, SUBST_RAW, executor=executor)
else:
c = env.subst(self.cmdstr, SUBST_RAW, target, source)
if c:
return c
cmd_list, ignore, silent = self.process(target, source, env, executor)
if silent:
return ''
return _string_from_cmd_list(cmd_list[0])
def execute(self, target, source, env, executor=None):
"""Execute a command action.
This will handle lists of commands as well as individual commands,
because construction variable substitution may turn a single
"command" into a list. This means that this class can actually
handle lists of commands, even though that's not how we use it
externally.
"""
escape_list = SCons.Subst.escape_list
flatten_sequence = SCons.Util.flatten_sequence
try:
shell = env['SHELL']
except KeyError:
raise SCons.Errors.UserError('Missing SHELL construction variable.')
try:
spawn = env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
else:
if is_String(spawn):
spawn = env.subst(spawn, raw=1, conv=lambda x: x)
escape = env.get('ESCAPE', lambda x: x)
ENV = get_default_ENV(env)
# Ensure that the ENV values are all strings:
for key, value in ENV.items():
if not is_String(value):
if is_List(value):
# If the value is a list, then we assume it is a
# path list, because that's a pretty common list-like
# value to stick in an environment variable:
value = flatten_sequence(value)
ENV[key] = os.pathsep.join(map(str, value))
else:
# If it isn't a string or a list, then we just coerce
# it to a string, which is the proper way to handle
# Dir and File instances and will produce something
# reasonable for just about everything else:
ENV[key] = str(value)
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
cmd_list, ignore, silent = self.process(target, list(map(rfile, source)), env, executor)
# Use len() to filter out any "command" that's zero-length.
for cmd_line in filter(len, cmd_list):
# Escape the command line for the interpreter we are using.
cmd_line = escape_list(cmd_line, escape)
result = spawn(shell, escape, cmd_line[0], cmd_line, ENV)
if not ignore and result:
msg = "Error %s" % result
return SCons.Errors.BuildError(errstr=msg,
status=result,
action=self,
command=cmd_line)
return 0
def get_presig(self, target, source, env, executor=None):
"""Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures.
"""
from SCons.Subst import SUBST_SIG
cmd = self.cmd_list
if is_List(cmd):
cmd = ' '.join(map(str, cmd))
else:
cmd = str(cmd)
if executor:
return env.subst_target_source(cmd, SUBST_SIG, executor=executor)
else:
return env.subst_target_source(cmd, SUBST_SIG, target, source)
def get_implicit_deps(self, target, source, env, executor=None):
icd = env.get('IMPLICIT_COMMAND_DEPENDENCIES', True)
if is_String(icd) and icd[:1] == '$':
icd = env.subst(icd)
if not icd or icd in ('0', 'None'):
return []
from SCons.Subst import SUBST_SIG
if executor:
cmd_list = env.subst_list(self.cmd_list, SUBST_SIG, executor=executor)
else:
cmd_list = env.subst_list(self.cmd_list, SUBST_SIG, target, source)
res = []
for cmd_line in cmd_list:
if cmd_line:
d = str(cmd_line[0])
m = strip_quotes.match(d)
if m:
d = m.group(1)
d = env.WhereIs(d)
if d:
res.append(env.fs.File(d))
return res
class CommandGeneratorAction(ActionBase):
"""Class for command-generator actions."""
def __init__(self, generator, kw):
if __debug__: logInstanceCreation(self, 'Action.CommandGeneratorAction')
self.generator = generator
self.gen_kw = kw
self.varlist = kw.get('varlist', ())
self.targets = kw.get('targets', '$TARGETS')
def _generate(self, target, source, env, for_signature, executor=None):
# ensure that target is a list, to make it easier to write
# generator functions:
if not is_List(target):
target = [target]
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
ret = self.generator(target=target,
source=source,
env=env,
for_signature=for_signature)
gen_cmd = Action(ret, **self.gen_kw)
if not gen_cmd:
raise SCons.Errors.UserError("Object returned from command generator: %s cannot be used to create an Action." % repr(ret))
return gen_cmd
def __str__(self):
try:
env = self.presub_env
except AttributeError:
env = None
if env is None:
env = SCons.Defaults.DefaultEnvironment()
act = self._generate([], [], env, 1)
return str(act)
def batch_key(self, env, target, source):
return self._generate(target, source, env, 1).batch_key(env, target, source)
def genstring(self, target, source, env, executor=None):
return self._generate(target, source, env, 1, executor).genstring(target, source, env)
def __call__(self, target, source, env, exitstatfunc=_null, presub=_null,
show=_null, execute=_null, chdir=_null, executor=None):
act = self._generate(target, source, env, 0, executor)
if act is None:
raise UserError("While building `%s': "
"Cannot deduce file extension from source files: %s"
% (repr(list(map(str, target))), repr(list(map(str, source)))))
return act(target, source, env, exitstatfunc, presub,
show, execute, chdir, executor)
def get_presig(self, target, source, env, executor=None):
"""Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures.
"""
return self._generate(target, source, env, 1, executor).get_presig(target, source, env)
def get_implicit_deps(self, target, source, env, executor=None):
return self._generate(target, source, env, 1, executor).get_implicit_deps(target, source, env)
def get_varlist(self, target, source, env, executor=None):
return self._generate(target, source, env, 1, executor).get_varlist(target, source, env, executor)
def get_targets(self, env, executor):
return self._generate(None, None, env, 1, executor).get_targets(env, executor)
# A LazyAction is a kind of hybrid generator and command action for
# strings of the form "$VAR". These strings normally expand to other
# strings (think "$CCCOM" to "$CC -c -o $TARGET $SOURCE"), but we also
# want to be able to replace them with functions in the construction
# environment. Consequently, we want lazy evaluation and creation of
# an Action in the case of the function, but that's overkill in the more
# normal case of expansion to other strings.
#
# So we do this with a subclass that's both a generator *and*
# a command action. The overridden methods all do a quick check
# of the construction variable, and if it's a string we just call
# the corresponding CommandAction method to do the heavy lifting.
# If not, then we call the same-named CommandGeneratorAction method.
# The CommandGeneratorAction methods work by using the overridden
# _generate() method, that is, our own way of handling "generation" of
# an action based on what's in the construction variable.
class LazyAction(CommandGeneratorAction, CommandAction):
def __init__(self, var, kw):
if __debug__: logInstanceCreation(self, 'Action.LazyAction')
#FUTURE CommandAction.__init__(self, '${'+var+'}', **kw)
CommandAction.__init__(self, '${'+var+'}', **kw)
self.var = SCons.Util.to_String(var)
self.gen_kw = kw
def get_parent_class(self, env):
c = env.get(self.var)
if is_String(c) and not '\n' in c:
return CommandAction
return CommandGeneratorAction
def _generate_cache(self, env):
if env:
c = env.get(self.var, '')
else:
c = ''
gen_cmd = Action(c, **self.gen_kw)
if not gen_cmd:
raise SCons.Errors.UserError("$%s value %s cannot be used to create an Action." % (self.var, repr(c)))
return gen_cmd
def _generate(self, target, source, env, for_signature, executor=None):
return self._generate_cache(env)
def __call__(self, target, source, env, *args, **kw):
c = self.get_parent_class(env)
return c.__call__(self, target, source, env, *args, **kw)
def get_presig(self, target, source, env):
c = self.get_parent_class(env)
return c.get_presig(self, target, source, env)
def get_varlist(self, target, source, env, executor=None):
c = self.get_parent_class(env)
return c.get_varlist(self, target, source, env, executor)
class FunctionAction(_ActionAction):
"""Class for Python function actions."""
def __init__(self, execfunction, kw):
if __debug__: logInstanceCreation(self, 'Action.FunctionAction')
self.execfunction = execfunction
try:
self.funccontents = _callable_contents(execfunction)
except AttributeError:
try:
# See if execfunction will do the heavy lifting for us.
self.gc = execfunction.get_contents
except AttributeError:
# This is weird, just do the best we can.
self.funccontents = _object_contents(execfunction)
_ActionAction.__init__(self, **kw)
def function_name(self):
try:
return self.execfunction.__name__
except AttributeError:
try:
return self.execfunction.__class__.__name__
except AttributeError:
return "unknown_python_function"
def strfunction(self, target, source, env, executor=None):
if self.cmdstr is None:
return None
if self.cmdstr is not _null:
from SCons.Subst import SUBST_RAW
if executor:
c = env.subst(self.cmdstr, SUBST_RAW, executor=executor)
else:
c = env.subst(self.cmdstr, SUBST_RAW, target, source)
if c:
return c
def array(a):
def quote(s):
try:
str_for_display = s.str_for_display
except AttributeError:
s = repr(s)
else:
s = str_for_display()
return s
return '[' + ", ".join(map(quote, a)) + ']'
try:
strfunc = self.execfunction.strfunction
except AttributeError:
pass
else:
if strfunc is None:
return None
if callable(strfunc):
return strfunc(target, source, env)
name = self.function_name()
tstr = array(target)
sstr = array(source)
return "%s(%s, %s)" % (name, tstr, sstr)
def __str__(self):
name = self.function_name()
if name == 'ActionCaller':
return str(self.execfunction)
return "%s(target, source, env)" % name
def execute(self, target, source, env, executor=None):
exc_info = (None,None,None)
try:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
rsources = list(map(rfile, source))
try:
result = self.execfunction(target=target, source=rsources, env=env)
except KeyboardInterrupt, e:
raise
except SystemExit, e:
raise
except Exception, e:
result = e
exc_info = sys.exc_info()
if result:
result = SCons.Errors.convert_to_BuildError(result, exc_info)
result.node=target
result.action=self
try:
result.command=self.strfunction(target, source, env, executor)
except TypeError:
result.command=self.strfunction(target, source, env)
# FIXME: This maintains backward compatibility with respect to
# which type of exceptions were returned by raising an
# exception and which ones were returned by value. It would
# probably be best to always return them by value here, but
# some codes do not check the return value of Actions and I do
# not have the time to modify them at this point.
if (exc_info[1] and
not isinstance(exc_info[1],EnvironmentError)):
raise result
return result
finally:
# Break the cycle between the traceback object and this
# function stack frame. See the sys.exc_info() doc info for
# more information about this issue.
del exc_info
def get_presig(self, target, source, env):
"""Return the signature contents of this callable action."""
try:
return self.gc(target, source, env)
except AttributeError:
return self.funccontents
def get_implicit_deps(self, target, source, env):
return []
class ListAction(ActionBase):
"""Class for lists of other actions."""
def __init__(self, actionlist):
if __debug__: logInstanceCreation(self, 'Action.ListAction')
def list_of_actions(x):
if isinstance(x, ActionBase):
return x
return Action(x)
self.list = list(map(list_of_actions, actionlist))
# our children will have had any varlist
# applied; we don't need to do it again
self.varlist = ()
self.targets = '$TARGETS'
def genstring(self, target, source, env):
return '\n'.join([a.genstring(target, source, env) for a in self.list])
def __str__(self):
return '\n'.join(map(str, self.list))
def presub_lines(self, env):
return SCons.Util.flatten_sequence(
[a.presub_lines(env) for a in self.list])
def get_presig(self, target, source, env):
"""Return the signature contents of this action list.
Simple concatenation of the signatures of the elements.
"""
return "".join([x.get_contents(target, source, env) for x in self.list])
def __call__(self, target, source, env, exitstatfunc=_null, presub=_null,
show=_null, execute=_null, chdir=_null, executor=None):
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
for act in self.list:
stat = act(target, source, env, exitstatfunc, presub,
show, execute, chdir, executor)
if stat:
return stat
return 0
def get_implicit_deps(self, target, source, env):
result = []
for act in self.list:
result.extend(act.get_implicit_deps(target, source, env))
return result
def get_varlist(self, target, source, env, executor=None):
result = SCons.Util.OrderedDict()
for act in self.list:
for var in act.get_varlist(target, source, env, executor):
result[var] = True
return list(result.keys())
class ActionCaller(object):
"""A class for delaying calling an Action function with specific
(positional and keyword) arguments until the Action is actually
executed.
This class looks to the rest of the world like a normal Action object,
but what it's really doing is hanging on to the arguments until we
have a target, source and env to use for the expansion.
"""
def __init__(self, parent, args, kw):
self.parent = parent
self.args = args
self.kw = kw
def get_contents(self, target, source, env):
actfunc = self.parent.actfunc
try:
# "self.actfunc" is a function.
contents = str(actfunc.func_code.co_code)
except AttributeError:
# "self.actfunc" is a callable object.
try:
contents = str(actfunc.__call__.im_func.func_code.co_code)
except AttributeError:
# No __call__() method, so it might be a builtin
# or something like that. Do the best we can.
contents = str(actfunc)
contents = remove_set_lineno_codes(contents)
return contents
def subst(self, s, target, source, env):
# If s is a list, recursively apply subst()
# to every element in the list
if is_List(s):
result = []
for elem in s:
result.append(self.subst(elem, target, source, env))
return self.parent.convert(result)
# Special-case hack: Let a custom function wrapped in an
# ActionCaller get at the environment through which the action
# was called by using this hard-coded value as a special return.
if s == '$__env__':
return env
elif is_String(s):
return env.subst(s, 1, target, source)
return self.parent.convert(s)
def subst_args(self, target, source, env):
return [self.subst(x, target, source, env) for x in self.args]
def subst_kw(self, target, source, env):
kw = {}
for key in self.kw.keys():
kw[key] = self.subst(self.kw[key], target, source, env)
return kw
def __call__(self, target, source, env, executor=None):
args = self.subst_args(target, source, env)
kw = self.subst_kw(target, source, env)
return self.parent.actfunc(*args, **kw)
def strfunction(self, target, source, env):
args = self.subst_args(target, source, env)
kw = self.subst_kw(target, source, env)
return self.parent.strfunc(*args, **kw)
def __str__(self):
return self.parent.strfunc(*self.args, **self.kw)
class ActionFactory(object):
"""A factory class that will wrap up an arbitrary function
as an SCons-executable Action object.
The real heavy lifting here is done by the ActionCaller class.
We just collect the (positional and keyword) arguments that we're
called with and give them to the ActionCaller object we create,
so it can hang onto them until it needs them.
"""
def __init__(self, actfunc, strfunc, convert=lambda x: x):
self.actfunc = actfunc
self.strfunc = strfunc
self.convert = convert
def __call__(self, *args, **kw):
ac = ActionCaller(self, args, kw)
action = Action(ac, strfunction=ac.strfunction)
return action
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
mrjaydee82/SinLessKerne1-m8-GPE | toolchains/474/share/gdb/python/gdb/command/pretty_printers.py | 137 | 14474 | # Pretty-printer commands.
# Copyright (C) 2010-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with pretty-printers."""
import copy
import gdb
import re
def parse_printer_regexps(arg):
"""Internal utility to parse a pretty-printer command argv.
Arguments:
arg: The arguments to the command. The format is:
[object-regexp [name-regexp]].
Individual printers in a collection are named as
printer-name;subprinter-name.
Returns:
The result is a 3-tuple of compiled regular expressions, except that
the resulting compiled subprinter regexp is None if not provided.
Raises:
SyntaxError: an error processing ARG
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
object_regexp = "" # match everything
name_regexp = "" # match everything
subname_regexp = None
if argc > 3:
raise SyntaxError("too many arguments")
if argc >= 1:
object_regexp = argv[0]
if argc >= 2:
name_subname = argv[1].split(";", 1)
name_regexp = name_subname[0]
if len(name_subname) == 2:
subname_regexp = name_subname[1]
# That re.compile raises SyntaxError was determined empirically.
# We catch it and reraise it to provide a slightly more useful
# error message for the user.
try:
object_re = re.compile(object_regexp)
except SyntaxError:
raise SyntaxError("invalid object regexp: %s" % object_regexp)
try:
name_re = re.compile (name_regexp)
except SyntaxError:
raise SyntaxError("invalid name regexp: %s" % name_regexp)
if subname_regexp is not None:
try:
subname_re = re.compile(subname_regexp)
except SyntaxError:
raise SyntaxError("invalid subname regexp: %s" % subname_regexp)
else:
subname_re = None
return(object_re, name_re, subname_re)
def printer_enabled_p(printer):
"""Internal utility to see if printer (or subprinter) is enabled."""
if hasattr(printer, "enabled"):
return printer.enabled
else:
return True
class InfoPrettyPrinter(gdb.Command):
"""GDB command to list all registered pretty-printers.
Usage: info pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to list.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__ (self):
super(InfoPrettyPrinter, self).__init__("info pretty-printer",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(printer):
"""Return "" if PRINTER is enabled, otherwise " [disabled]"."""
if printer_enabled_p(printer):
return ""
else:
return " [disabled]"
@staticmethod
def printer_name(printer):
"""Return the printer's name."""
if hasattr(printer, "name"):
return printer.name
if hasattr(printer, "__name__"):
return printer.__name__
# This "shouldn't happen", but the public API allows for
# direct additions to the pretty-printer list, and we shouldn't
# crash because someone added a bogus printer.
# Plus we want to give the user a way to list unknown printers.
return "unknown"
def list_pretty_printers(self, pretty_printers, name_re, subname_re):
"""Print a list of pretty-printers."""
# A potential enhancement is to provide an option to list printers in
# "lookup order" (i.e. unsorted).
sorted_pretty_printers = sorted (copy.copy(pretty_printers),
key = self.printer_name)
for printer in sorted_pretty_printers:
name = self.printer_name(printer)
enabled = self.enabled_string(printer)
if name_re.match(name):
print (" %s%s" % (name, enabled))
if (hasattr(printer, "subprinters") and
printer.subprinters is not None):
sorted_subprinters = sorted (copy.copy(printer.subprinters),
key = self.printer_name)
for subprinter in sorted_subprinters:
if (not subname_re or
subname_re.match(subprinter.name)):
print (" %s%s" %
(subprinter.name,
self.enabled_string(subprinter)))
def invoke1(self, title, printer_list,
obj_name_to_match, object_re, name_re, subname_re):
"""Subroutine of invoke to simplify it."""
if printer_list and object_re.match(obj_name_to_match):
print (title)
self.list_pretty_printers(printer_list, name_re, subname_re)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
(object_re, name_re, subname_re) = parse_printer_regexps(arg)
self.invoke1("global pretty-printers:", gdb.pretty_printers,
"global", object_re, name_re, subname_re)
cp = gdb.current_progspace()
self.invoke1("progspace %s pretty-printers:" % cp.filename,
cp.pretty_printers, "progspace",
object_re, name_re, subname_re)
for objfile in gdb.objfiles():
self.invoke1(" objfile %s pretty-printers:" % objfile.filename,
objfile.pretty_printers, objfile.filename,
object_re, name_re, subname_re)
def count_enabled_printers(pretty_printers):
"""Return a 2-tuple of number of enabled and total printers."""
enabled = 0
total = 0
for printer in pretty_printers:
if (hasattr(printer, "subprinters")
and printer.subprinters is not None):
if printer_enabled_p(printer):
for subprinter in printer.subprinters:
if printer_enabled_p(subprinter):
enabled += 1
total += len(printer.subprinters)
else:
if printer_enabled_p(printer):
enabled += 1
total += 1
return (enabled, total)
def count_all_enabled_printers():
"""Return a 2-tuble of the enabled state and total number of all printers.
This includes subprinters.
"""
enabled_count = 0
total_count = 0
(t_enabled, t_total) = count_enabled_printers(gdb.pretty_printers)
enabled_count += t_enabled
total_count += t_total
(t_enabled, t_total) = count_enabled_printers(gdb.current_progspace().pretty_printers)
enabled_count += t_enabled
total_count += t_total
for objfile in gdb.objfiles():
(t_enabled, t_total) = count_enabled_printers(objfile.pretty_printers)
enabled_count += t_enabled
total_count += t_total
return (enabled_count, total_count)
def pluralize(text, n, suffix="s"):
"""Return TEXT pluralized if N != 1."""
if n != 1:
return "%s%s" % (text, suffix)
else:
return text
def show_pretty_printer_enabled_summary():
"""Print the number of printers enabled/disabled.
We count subprinters individually.
"""
(enabled_count, total_count) = count_all_enabled_printers()
print ("%d of %d printers enabled" % (enabled_count, total_count))
def do_enable_pretty_printer_1 (pretty_printers, name_re, subname_re, flag):
"""Worker for enabling/disabling pretty-printers.
Arguments:
pretty_printers: list of pretty-printers
name_re: regular-expression object to select printers
subname_re: regular expression object to select subprinters or None
if all are affected
flag: True for Enable, False for Disable
Returns:
The number of printers affected.
This is just for informational purposes for the user.
"""
total = 0
for printer in pretty_printers:
if (hasattr(printer, "name") and name_re.match(printer.name) or
hasattr(printer, "__name__") and name_re.match(printer.__name__)):
if (hasattr(printer, "subprinters") and
printer.subprinters is not None):
if not subname_re:
# Only record printers that change state.
if printer_enabled_p(printer) != flag:
for subprinter in printer.subprinters:
if printer_enabled_p(subprinter):
total += 1
# NOTE: We preserve individual subprinter settings.
printer.enabled = flag
else:
# NOTE: Whether this actually disables the subprinter
# depends on whether the printer's lookup function supports
# the "enable" API. We can only assume it does.
for subprinter in printer.subprinters:
if subname_re.match(subprinter.name):
# Only record printers that change state.
if (printer_enabled_p(printer) and
printer_enabled_p(subprinter) != flag):
total += 1
subprinter.enabled = flag
else:
# This printer has no subprinters.
# If the user does "disable pretty-printer .* .* foo"
# should we disable printers that don't have subprinters?
# How do we apply "foo" in this context? Since there is no
# "foo" subprinter it feels like we should skip this printer.
# There's still the issue of how to handle
# "disable pretty-printer .* .* .*", and every other variation
# that can match everything. For now punt and only support
# "disable pretty-printer .* .*" (i.e. subname is elided)
# to disable everything.
if not subname_re:
# Only record printers that change state.
if printer_enabled_p(printer) != flag:
total += 1
printer.enabled = flag
return total
def do_enable_pretty_printer (arg, flag):
"""Internal worker for enabling/disabling pretty-printers."""
(object_re, name_re, subname_re) = parse_printer_regexps(arg)
total = 0
if object_re.match("global"):
total += do_enable_pretty_printer_1(gdb.pretty_printers,
name_re, subname_re, flag)
cp = gdb.current_progspace()
if object_re.match("progspace"):
total += do_enable_pretty_printer_1(cp.pretty_printers,
name_re, subname_re, flag)
for objfile in gdb.objfiles():
if object_re.match(objfile.filename):
total += do_enable_pretty_printer_1(objfile.pretty_printers,
name_re, subname_re, flag)
if flag:
state = "enabled"
else:
state = "disabled"
print ("%d %s %s" % (total, pluralize("printer", total), state))
# Print the total list of printers currently enabled/disabled.
# This is to further assist the user in determining whether the result
# is expected. Since we use regexps to select it's useful.
show_pretty_printer_enabled_summary()
# Enable/Disable one or more pretty-printers.
#
# This is intended for use when a broken pretty-printer is shipped/installed
# and the user wants to disable that printer without disabling all the other
# printers.
#
# A useful addition would be -v (verbose) to show each printer affected.
class EnablePrettyPrinter (gdb.Command):
"""GDB command to enable the specified pretty-printer.
Usage: enable pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to examine.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__(self):
super(EnablePrettyPrinter, self).__init__("enable pretty-printer",
gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_pretty_printer(arg, True)
class DisablePrettyPrinter (gdb.Command):
"""GDB command to disable the specified pretty-printer.
Usage: disable pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to examine.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__(self):
super(DisablePrettyPrinter, self).__init__("disable pretty-printer",
gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_pretty_printer(arg, False)
def register_pretty_printer_commands():
"""Call from a top level script to install the pretty-printer commands."""
InfoPrettyPrinter()
EnablePrettyPrinter()
DisablePrettyPrinter()
register_pretty_printer_commands()
| gpl-2.0 |
tsh-xx/folio100 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
duramato/SickRage | tornado/iostream.py | 63 | 60393 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import sys
import re
from tornado.concurrent import TracebackFuture
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError
from tornado import stack_context
from tornado.util import errno_from_exception
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
# These errnos indicate that a connection has been abruptly terminated.
# They should be caught and handled less noisily than other errors.
_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT)
if sys.platform == 'darwin':
# OSX appears to have a race condition that causes send(2) to return
# EPROTOTYPE if called while a socket is being torn down:
# http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
# Since the socket is being closed anyway, treat this as an ECONNRESET
# instead of an unexpected error.
_ERRNO_CONNRESET += (errno.EPROTOTYPE,)
# More non-portable errnos:
_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
if hasattr(errno, "WSAEINPROGRESS"):
_ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,)
#######################################################
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
"""
pass
class UnsatisfiableReadError(Exception):
"""Exception raised when a read cannot be satisfied.
Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
argument.
"""
pass
class StreamBufferFullError(Exception):
"""Exception raised by `IOStream` methods when the buffer is full.
"""
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take an optional ``callback`` argument and return a
`.Future` only if no callback is given. When the operation completes,
the callback will be run or the `.Future` will resolve with the data
read (or ``None`` for ``write()``). All outstanding ``Futures`` will
resolve with a `StreamClosedError` when the stream is closed; users
of the callback interface will be notified via
`.BaseIOStream.set_close_callback` instead.
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
Deprecated since Tornado 4.1.
:arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the
underlying transport; defaults to 64KB.
:arg max_write_buffer_size: Amount of outgoing data to buffer;
defaults to unlimited.
.. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB.
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause
# spurious failures.
self.read_chunk_size = min(read_chunk_size or 65536,
self.max_buffer_size // 2)
self.max_write_buffer_size = max_write_buffer_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_max_bytes = None
self._read_bytes = None
self._read_partial = False
self._read_until_close = False
self._read_callback = None
self._read_future = None
self._streaming_callback = None
self._write_callback = None
self._write_future = None
self._close_callback = None
self._connect_callback = None
self._connect_future = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback=None, max_bytes=None):
"""Asynchronously read until we have matched the given regex.
The result includes the data that matches the regex and anything
that came before it. If a callback is given, it will be run
with the data as an argument; if not, this method returns a
`.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the regex is
not satisfied.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
# Ensure that the future doesn't log an error because its
# failure was never examined.
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until(self, delimiter, callback=None, max_bytes=None):
"""Asynchronously read until we have found the given delimiter.
The result includes all the data read including the delimiter.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the delimiter
is not found.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_delimiter = delimiter
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
partial=False):
"""Asynchronously read a number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as we have
any bytes to return (but never more than ``num_bytes``)
.. versionchanged:: 4.0
Added the ``partial`` argument. The callback argument is now
optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._read_partial = partial
self._streaming_callback = stack_context.wrap(streaming_callback)
try:
self._try_inline_read()
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
.. versionchanged:: 4.0
The callback argument is now optional and a `.Future` will
be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_read_callback(self._read_buffer_size, True)
self._run_read_callback(self._read_buffer_size, False)
return future
self._read_until_close = True
try:
self._try_inline_read()
except:
future.add_done_callback(lambda f: f.exception())
raise
return future
def write(self, data, callback=None):
"""Asynchronously write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
If no ``callback`` is given, this method returns a `.Future` that
resolves (with a result of ``None``) when the write has been
completed. If `write` is called again before that `.Future` has
resolved, the previous future will be orphaned and will never resolve.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
assert isinstance(data, bytes)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum write buffer size")
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE])
self._write_buffer_size += len(data)
if callback is not None:
self._write_callback = stack_context.wrap(callback)
future = None
else:
future = self._write_future = TracebackFuture()
future.add_done_callback(lambda f: f.exception())
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed.
This is not necessary for applications that use the `.Future`
interface; all outstanding ``Futures`` will resolve with a
`StreamClosedError` when the stream is closed.
"""
self._close_callback = stack_context.wrap(callback)
self._maybe_add_error_listener()
def close(self, exc_info=False):
"""Close this stream.
If ``exc_info`` is true, set the ``error`` attribute to the current
exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
use that instead of `sys.exc_info`).
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
self._run_read_callback(self._read_buffer_size, True)
self._read_until_close = False
self._run_read_callback(self._read_buffer_size, False)
if self._state is not None:
self.io_loop.remove_handler(self.fileno())
self._state = None
self.close_fd()
self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
# If there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
if self.closed() and self._pending_callbacks == 0:
futures = []
if self._read_future is not None:
futures.append(self._read_future)
self._read_future = None
if self._write_future is not None:
futures.append(self._write_future)
self._write_future = None
if self._connect_future is not None:
futures.append(self._connect_future)
self._connect_future = None
for future in futures:
if (isinstance(self.error, (socket.error, IOError)) and
errno_from_exception(self.error) in _ERRNO_CONNRESET):
# Treat connection resets as closed connections so
# clients only have to catch one kind of exception
# to avoid logging.
future.set_exception(StreamClosedError())
else:
future.set_exception(self.error or StreamClosedError())
if self._close_callback is not None:
cb = self._close_callback
self._close_callback = None
self._run_callback(cb)
# Delete any unfinished callbacks to break up reference cycles.
self._read_callback = self._write_callback = None
# Clear the buffers so they can be cleared immediately even
# if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None or self._read_future is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
return self._closed
def set_nodelay(self, value):
"""Sets the no-delay flag for this stream.
By default, data written to TCP streams may be held for a time
to make the most efficient use of bandwidth (according to
Nagle's algorithm). The no-delay flag requests that data be
written as soon as possible, even if doing so would consume
additional bandwidth.
This flag is currently defined only for TCP-based ``IOStreams``.
.. versionadded:: 3.1
"""
pass
def _handle_events(self, fd, events):
if self.closed():
gen_log.warning("Got events for closed stream %s", fd)
return
try:
if self._connecting:
# Most IOLoops will report a write failed connect
# with the WRITE event, but SelectIOLoop reports a
# READ as well so we must check for connecting before
# either.
self._handle_connect()
if self.closed():
return
if events & self.io_loop.READ:
self._handle_read()
if self.closed():
return
if events & self.io_loop.WRITE:
self._handle_write()
if self.closed():
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
self.io_loop.add_callback(self.close)
return
state = self.io_loop.ERROR
if self.reading():
state |= self.io_loop.READ
if self.writing():
state |= self.io_loop.WRITE
if state == self.io_loop.ERROR and self._read_buffer_size == 0:
# If the connection is idle, listen for reads too so
# we can tell if the connection is closed. If there is
# data in the read buffer we won't run the close callback
# yet anyway, so we don't need to listen in this case.
state |= self.io_loop.READ
if state != self._state:
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
except Exception:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
raise
def _run_callback(self, callback, *args):
def wrapper():
self._pending_callbacks -= 1
try:
return callback(*args)
except Exception:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
finally:
self._maybe_add_error_listener()
# We schedule callbacks to be run on the next IOLoop iteration
# rather than running them directly for several reasons:
# * Prevents unbounded stack growth when a callback calls an
# IOLoop operation that immediately runs another callback
# * Provides a predictable execution context for e.g.
# non-reentrant mutexes
# * Ensures that the try/except in wrapper() is run outside
# of the application's StackContexts
with stack_context.NullContext():
# stack_context was already captured in callback, we don't need to
# capture it again for IOStream's wrapper. This is especially
# important if the callback was pre-wrapped before entry to
# IOStream (as in HTTPConnection._header_callback), as we could
# capture and leak the wrong context here.
self._pending_callbacks += 1
self.io_loop.add_callback(wrapper)
def _read_to_buffer_loop(self):
# This method is called from _handle_read and _try_inline_read.
try:
if self._read_bytes is not None:
target_bytes = self._read_bytes
elif self._read_max_bytes is not None:
target_bytes = self._read_max_bytes
elif self.reading():
# For read_until without max_bytes, or
# read_until_close, read as much as we can before
# scanning for the delimiter.
target_bytes = None
else:
target_bytes = 0
next_find_pos = 0
# Pretend to have a pending callback so that an EOF in
# _read_to_buffer doesn't trigger an immediate close
# callback. At the end of this method we'll either
# establish a real pending callback via
# _read_from_buffer or run the close callback.
#
# We need two try statements here so that
# pending_callbacks is decremented before the `except`
# clause below (which calls `close` and does need to
# trigger the callback)
self._pending_callbacks += 1
while not self.closed():
# Read from the socket until we get EWOULDBLOCK or equivalent.
# SSL sockets do some internal buffering, and if the data is
# sitting in the SSL object's buffer select() and friends
# can't see it; the only way to find out if it's there is to
# try to read it.
if self._read_to_buffer() == 0:
break
self._run_streaming_callback()
# If we've read all the bytes we can use, break out of
# this loop. We can't just call read_from_buffer here
# because of subtle interactions with the
# pending_callback and error_listener mechanisms.
#
# If we've reached target_bytes, we know we're done.
if (target_bytes is not None and
self._read_buffer_size >= target_bytes):
break
# Otherwise, we need to call the more expensive find_read_pos.
# It's inefficient to do this on every read, so instead
# do it on the first read and whenever the read buffer
# size has doubled.
if self._read_buffer_size >= next_find_pos:
pos = self._find_read_pos()
if pos is not None:
return pos
next_find_pos = self._read_buffer_size * 2
return self._find_read_pos()
finally:
self._pending_callbacks -= 1
def _handle_read(self):
try:
pos = self._read_to_buffer_loop()
except UnsatisfiableReadError:
raise
except Exception:
gen_log.warning("error on read", exc_info=True)
self.close(exc_info=True)
return
if pos is not None:
self._read_from_buffer(pos)
return
else:
self._maybe_run_close_callback()
def _set_read_callback(self, callback):
assert self._read_callback is None, "Already reading"
assert self._read_future is None, "Already reading"
if callback is not None:
self._read_callback = stack_context.wrap(callback)
else:
self._read_future = TracebackFuture()
return self._read_future
def _run_read_callback(self, size, streaming):
if streaming:
callback = self._streaming_callback
else:
callback = self._read_callback
self._read_callback = self._streaming_callback = None
if self._read_future is not None:
assert callback is None
future = self._read_future
self._read_future = None
future.set_result(self._consume(size))
if callback is not None:
assert self._read_future is None
self._run_callback(callback, self._consume(size))
else:
# If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now.
self._maybe_add_error_listener()
def _try_inline_read(self):
"""Attempt to complete the current read operation from buffered data.
If the read can be completed without blocking, schedules the
read callback on the next IOLoop iteration; otherwise starts
listening for reads on the socket.
"""
# See if we've already got the data from a previous read
self._run_streaming_callback()
pos = self._find_read_pos()
if pos is not None:
self._read_from_buffer(pos)
return
self._check_closed()
try:
pos = self._read_to_buffer_loop()
except Exception:
# If there was an in _read_to_buffer, we called close() already,
# but couldn't run the close callback because of _pending_callbacks.
# Before we escape from this function, run the close callback if
# applicable.
self._maybe_run_close_callback()
raise
if pos is not None:
self._read_from_buffer(pos)
return
# We couldn't satisfy the read inline, so either close the stream
# or listen for new data.
if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
# ssl.SSLError is a subclass of socket.error
if e.args[0] in _ERRNO_CONNRESET:
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk)
def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size:
bytes_to_consume = self._read_buffer_size
if self._read_bytes is not None:
bytes_to_consume = min(self._read_bytes, bytes_to_consume)
self._read_bytes -= bytes_to_consume
self._run_read_callback(bytes_to_consume, True)
def _read_from_buffer(self, pos):
"""Attempts to complete the currently-pending read from the buffer.
The argument is either a position in the read buffer or None,
as returned by _find_read_pos.
"""
self._read_bytes = self._read_delimiter = self._read_regex = None
self._read_partial = False
self._run_read_callback(pos, False)
def _find_read_pos(self):
"""Attempts to find a position in the read buffer that satisfies
the currently-pending read.
Returns a position in the buffer if the current read can be satisfied,
or None if it cannot.
"""
if (self._read_bytes is not None and
(self._read_buffer_size >= self._read_bytes or
(self._read_partial and self._read_buffer_size > 0))):
num_bytes = min(self._read_bytes, self._read_buffer_size)
return num_bytes
elif self._read_delimiter is not None:
# Multi-byte delimiters (e.g. '\r\n') may straddle two
# chunks in the read buffer, so we can't easily find them
# without collapsing the buffer. However, since protocols
# using delimited reads (as opposed to reads of a known
# length) tend to be "line" oriented, the delimiter is likely
# to be in the first few chunks. Merge the buffer gradually
# since large merges are relatively expensive and get undone in
# _consume().
if self._read_buffer:
while True:
loc = self._read_buffer[0].find(self._read_delimiter)
if loc != -1:
delimiter_len = len(self._read_delimiter)
self._check_max_bytes(self._read_delimiter,
loc + delimiter_len)
return loc + delimiter_len
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_delimiter,
len(self._read_buffer[0]))
elif self._read_regex is not None:
if self._read_buffer:
while True:
m = self._read_regex.search(self._read_buffer[0])
if m is not None:
self._check_max_bytes(self._read_regex, m.end())
return m.end()
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_regex,
len(self._read_buffer[0]))
return None
def _check_max_bytes(self, delimiter, size):
if (self._read_max_bytes is not None and
size > self._read_max_bytes):
raise UnsatisfiableReadError(
"delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes))
def _handle_write(self):
while self._write_buffer:
try:
if not self._write_buffer_frozen:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._write_buffer_frozen = True
break
self._write_buffer_frozen = False
_merge_prefix(self._write_buffer, num_bytes)
self._write_buffer.popleft()
self._write_buffer_size -= num_bytes
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
self._write_buffer_frozen = True
break
else:
if e.args[0] not in _ERRNO_CONNRESET:
# Broken pipe errors are usually caused by connection
# reset, and its better to not log EPIPE errors to
# minimize log spam
gen_log.warning("Write error on %s: %s",
self.fileno(), e)
self.close(exc_info=True)
return
if not self._write_buffer:
if self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
if self._write_future:
future = self._write_future
self._write_future = None
future.set_result(None)
def _consume(self, loc):
if loc == 0:
return b""
_merge_prefix(self._read_buffer, loc)
self._read_buffer_size -= loc
return self._read_buffer.popleft()
def _check_closed(self):
if self.closed():
raise StreamClosedError("Stream is closed")
def _maybe_add_error_listener(self):
# This method is part of an optimization: to detect a connection that
# is closed when we're not actively reading or writing, we must listen
# for read events. However, it is inefficient to do this when the
# connection is first established because we are going to read or write
# immediately anyway. Instead, we insert checks at various times to
# see if the connection is idle and add the read listener then.
if self._pending_callbacks != 0:
return
if self._state is None or self._state == ioloop.IOLoop.ERROR:
if self.closed():
self._maybe_run_close_callback()
elif (self._read_buffer_size == 0 and
self._close_callback is not None):
self._add_io_state(ioloop.IOLoop.READ)
def _add_io_state(self, state):
"""Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
Implementation notes: Reads and writes have a fast path and a
slow path. The fast path reads synchronously from socket
buffers, while the slow path uses `_add_io_state` to schedule
an IOLoop callback. Note that in both cases, the callback is
run asynchronously with `_run_callback`.
To detect closed connections, we must have called
`_add_io_state` at some point, but we want to delay this as
much as possible so we don't have to set an `IOLoop.ERROR`
listener that will be overwritten by the next slow-path
operation. As long as there are callbacks scheduled for
fast-path ops, those callbacks may do more reads.
If a sequence of fast-path ops do not end in a slow-path op,
(e.g. for an @asynchronous long-poll request), we must add
the error handler. This is done in `_run_callback` and `write`
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.fileno(), self._state)
class IOStream(BaseIOStream):
r"""Socket-based `IOStream` implementation.
This class supports the read and write methods from `BaseIOStream`
plus a `connect` method.
The ``socket`` parameter may either be connected or unconnected.
For server operations the socket is the result of calling
`socket.accept <socket.socket.accept>`. For client operations the
socket is created with `socket.socket`, and may either be
connected before passing it to the `IOStream` or connected with
`IOStream.connect`.
A very simple (and broken) HTTP client using this class::
import tornado.ioloop
import tornado.iostream
import socket
def send_request():
stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
stream.read_until(b"\r\n\r\n", on_headers)
def on_headers(data):
headers = {}
for line in data.split(b"\r\n"):
parts = line.split(b":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers[b"Content-Length"]), on_body)
def on_body(data):
print data
stream.close()
tornado.ioloop.IOLoop.instance().stop()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("friendfeed.com", 80), send_request)
tornado.ioloop.IOLoop.instance().start()
"""
def __init__(self, socket, *args, **kwargs):
self.socket = socket
self.socket.setblocking(False)
super(IOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.socket
def close_fd(self):
self.socket.close()
self.socket = None
def get_fd_error(self):
errno = self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def write_to_fd(self, data):
return self.socket.send(data)
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
May only be called if the socket passed to the constructor was
not previously connected. The address parameter is in the
same format as for `socket.connect <socket.socket.connect>` for
the type of socket passed to the IOStream constructor,
e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
but will be resolved synchronously and block the IOLoop.
If you have a hostname instead of an IP address, the `.TCPClient`
class is recommended instead of calling this method directly.
`.TCPClient` will do asynchronous DNS resolution and handle
both IPv4 and IPv6.
If ``callback`` is specified, it will be called with no
arguments when the connection is completed; if not this method
returns a `.Future` (whose result after a successful
connection will be the stream itself).
If specified, the ``server_hostname`` parameter will be used
in SSL connections for certificate validation (if requested in
the ``ssl_options``) and SNI (if supported; requires
Python 3.2+).
Note that it is safe to call `IOStream.write
<BaseIOStream.write>` while the connection is pending, in
which case the data will be written as soon as the connection
is ready. Calling `IOStream` read methods before the socket is
connected works on some platforms but is non-portable.
.. versionchanged:: 4.0
If no callback is given, returns a `.Future`.
"""
self._connecting = True
if callback is not None:
self._connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._connect_future = TracebackFuture()
try:
self.socket.connect(address)
except socket.error as e:
# In non-blocking mode we expect connect() to raise an
# exception with EINPROGRESS or EWOULDBLOCK.
#
# On freebsd, other errors such as ECONNREFUSED may be
# returned immediately when attempting to connect to
# localhost, so handle them the same way as an error
# reported later in _handle_connect.
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
if future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
return future
self._add_io_state(self.io_loop.WRITE)
return future
def start_tls(self, server_side, ssl_options=None, server_hostname=None):
"""Convert this `IOStream` to an `SSLIOStream`.
This enables protocols that begin in clear-text mode and
switch to SSL after some initial negotiation (such as the
``STARTTLS`` extension to SMTP and IMAP).
This method cannot be used if there are outstanding reads
or writes on the stream, or if there is any data in the
IOStream's buffer (data in the operating system's socket
buffer is allowed). This means it must generally be used
immediately after reading or writing the last clear-text
data. It can also be used immediately after connecting,
before any reads or writes.
The ``ssl_options`` argument may be either a dictionary
of options or an `ssl.SSLContext`. If a ``server_hostname``
is given, it will be used for certificate verification
(as configured in the ``ssl_options``).
This method returns a `.Future` whose result is the new
`SSLIOStream`. After this method has been called,
any other operation on the original stream is undefined.
If a close callback is defined on this stream, it will be
transferred to the new stream.
.. versionadded:: 4.0
"""
if (self._read_callback or self._read_future or
self._write_callback or self._write_future or
self._connect_callback or self._connect_future or
self._pending_callbacks or self._closed or
self._read_buffer or self._write_buffer):
raise ValueError("IOStream is not idle; cannot convert to SSL")
if ssl_options is None:
ssl_options = {}
socket = self.socket
self.io_loop.remove_handler(socket)
self.socket = None
socket = ssl_wrap_socket(socket, ssl_options,
server_hostname=server_hostname,
server_side=server_side,
do_handshake_on_connect=False)
orig_close_callback = self._close_callback
self._close_callback = None
future = TracebackFuture()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
io_loop=self.io_loop)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
# so that repeated wrap/unwrap calls don't build up layers.
def close_callback():
if not future.done():
future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None:
orig_close_callback()
ssl_stream.set_close_callback(close_callback)
ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
ssl_stream.max_buffer_size = self.max_buffer_size
ssl_stream.read_chunk_size = self.read_chunk_size
return future
def _handle_connect(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
# an error state before the socket becomes writable, so
# in that case a connection failure would be handled by the
# error path in _handle_events instead of here.
if self._connect_future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), errno.errorcode[err])
self.close()
return
if self._connect_callback is not None:
callback = self._connect_callback
self._connect_callback = None
self._run_callback(callback)
if self._connect_future is not None:
future = self._connect_future
self._connect_future = None
future.set_result(self)
self._connecting = False
def set_nodelay(self, value):
if (self.socket is not None and
self.socket.family in (socket.AF_INET, socket.AF_INET6)):
try:
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1 if value else 0)
except socket.error as e:
# Sometimes setsockopt will fail if the socket is closed
# at the wrong time. This can happen with HTTPServer
# resetting the value to false between requests.
if e.errno not in (errno.EINVAL, errno.ECONNRESET):
raise
class SSLIOStream(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
def __init__(self, *args, **kwargs):
"""The ``ssl_options`` keyword argument may either be a dictionary
of keywords arguments for `ssl.wrap_socket`, or an `ssl.SSLContext`
object.
"""
self._ssl_options = kwargs.pop('ssl_options', {})
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._server_hostname = None
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except socket.error:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def reading(self):
return self._handshake_reading or super(SSLIOStream, self).reading()
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
except Exception:
peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
raise
except socket.error as err:
# Some port scans (e.g. nmap in -sT mode) have been known
# to cause do_handshake to raise EBADF, so make that error
# quiet as well.
# https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
if (err.args[0] in _ERRNO_CONNRESET or
err.args[0] == errno.EBADF):
return self.close(exc_info=True)
raise
except AttributeError:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
self.close()
return
if self._ssl_connect_callback is not None:
callback = self._ssl_connect_callback
self._ssl_connect_callback = None
self._run_callback(callback)
def _verify_cert(self, peercert):
"""Returns True if peercert is valid according to the configured
validation mode and hostname.
The ssl handshake already tested the certificate for a valid
CA signature; the only thing that remains is to check
the hostname.
"""
if isinstance(self._ssl_options, dict):
verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
elif isinstance(self._ssl_options, ssl.SSLContext):
verify_mode = self._ssl_options.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
return True
cert = self.socket.getpeercert()
if cert is None and verify_mode == ssl.CERT_REQUIRED:
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError:
gen_log.warning("Invalid SSL certificate", exc_info=True)
return False
else:
return True
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
def connect(self, address, callback=None, server_hostname=None):
# Save the user's callback and run it after the ssl handshake
# has completed.
self._ssl_connect_callback = stack_context.wrap(callback)
self._server_hostname = server_hostname
# Note: Since we don't pass our callback argument along to
# super.connect(), this will always return a Future.
# This is harmless, but a bit less efficient than it could be.
return super(SSLIOStream, self).connect(address, callback=None)
def _handle_connect(self):
# Call the superclass method to check for errors.
super(SSLIOStream, self)._handle_connect()
if self.closed():
return
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
#
# The IOLoop will get confused if we swap out self.socket while the
# fd is registered, so remove it now and re-register after
# wrap_socket().
self.io_loop.remove_handler(self.socket)
old_state = self._state
self._state = None
self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False)
self._add_io_state(old_state)
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
class PipeIOStream(BaseIOStream):
"""Pipe-based `IOStream` implementation.
The constructor takes an integer file descriptor (such as one returned
by `os.pipe`) rather than an open file object. Pipes are generally
one-way, so a `PipeIOStream` can be used for reading or writing but not
both.
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.fd
def close_fd(self):
os.close(self.fd)
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return None
elif errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
first one is small.
"""
new_len = max(len(deque[0]) * 2,
(len(deque[0]) + len(deque[1])))
_merge_prefix(deque, new_len)
def _merge_prefix(deque, size):
"""Replace the first entries in a deque of strings with a single
string of up to size bytes.
>>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
>>> _merge_prefix(d, 5); print(d)
deque(['abcde', 'fghi', 'j'])
Strings will be split as necessary to reach the desired size.
>>> _merge_prefix(d, 7); print(d)
deque(['abcdefg', 'hi', 'j'])
>>> _merge_prefix(d, 3); print(d)
deque(['abc', 'defg', 'hi', 'j'])
>>> _merge_prefix(d, 100); print(d)
deque(['abcdefghij'])
"""
if len(deque) == 1 and len(deque[0]) <= size:
return
prefix = []
remaining = size
while deque and remaining > 0:
chunk = deque.popleft()
if len(chunk) > remaining:
deque.appendleft(chunk[remaining:])
chunk = chunk[:remaining]
prefix.append(chunk)
remaining -= len(chunk)
# This data structure normally just contains byte strings, but
# the unittest gets messy if it doesn't use the default str() type,
# so do the merge based on the type of data that's actually present.
if prefix:
deque.appendleft(type(prefix[0])().join(prefix))
if not deque:
deque.appendleft(b"")
def doctests():
import doctest
return doctest.DocTestSuite()
| gpl-3.0 |
bcferrycoder/holideck | requests/packages/chardet2/codingstatemachine.py | 25 | 2297 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][c]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
self._mCurrentState = self._mModel['stateTable'][self._mCurrentState * self._mModel['classFactor'] + byteCls]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| mit |
GDGND/evm | allauth/socialaccount/providers/openid/views.py | 53 | 3844 | from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from openid.consumer.discover import DiscoveryFailure
from openid.consumer import consumer
from openid.extensions.sreg import SRegRequest
from openid.extensions.ax import FetchRequest, AttrInfo
from allauth.socialaccount.app_settings import QUERY_EMAIL
from allauth.socialaccount.models import SocialLogin
from allauth.socialaccount.helpers import render_authentication_error
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount import providers
from .utils import (DBOpenIDStore, SRegFields, AXAttributes,
JSONSafeSession)
from .forms import LoginForm
from .provider import OpenIDProvider
from ..base import AuthError
def _openid_consumer(request):
store = DBOpenIDStore()
client = consumer.Consumer(JSONSafeSession(request.session), store)
return client
def login(request):
if 'openid' in request.GET or request.method == 'POST':
form = LoginForm(
dict(list(request.GET.items()) + list(request.POST.items()))
)
if form.is_valid():
client = _openid_consumer(request)
try:
auth_request = client.begin(form.cleaned_data['openid'])
if QUERY_EMAIL:
sreg = SRegRequest()
for name in SRegFields:
sreg.requestField(field_name=name,
required=True)
auth_request.addExtension(sreg)
ax = FetchRequest()
for name in AXAttributes:
ax.add(AttrInfo(name,
required=True))
auth_request.addExtension(ax)
callback_url = reverse(callback)
SocialLogin.stash_state(request)
redirect_url = auth_request.redirectURL(
request.build_absolute_uri('/'),
request.build_absolute_uri(callback_url))
return HttpResponseRedirect(redirect_url)
# UnicodeDecodeError:
# see https://github.com/necaris/python3-openid/issues/1
except (UnicodeDecodeError, DiscoveryFailure) as e:
if request.method == 'POST':
form._errors["openid"] = form.error_class([e])
else:
return render_authentication_error(
request,
OpenIDProvider.id,
exception=e)
else:
form = LoginForm(initial={'next': request.GET.get('next'),
'process': request.GET.get('process')})
d = dict(form=form)
return render_to_response('openid/login.html',
d, context_instance=RequestContext(request))
@csrf_exempt
def callback(request):
client = _openid_consumer(request)
response = client.complete(
dict(list(request.GET.items()) + list(request.POST.items())),
request.build_absolute_uri(request.path))
if response.status == consumer.SUCCESS:
login = providers.registry \
.by_id(OpenIDProvider.id) \
.sociallogin_from_response(request, response)
login.state = SocialLogin.unstash_state(request)
ret = complete_social_login(request, login)
else:
if response.status == consumer.CANCEL:
error = AuthError.CANCELLED
else:
error = AuthError.UNKNOWN
ret = render_authentication_error(
request,
OpenIDProvider.id,
error=error)
return ret
| mit |
thnee/ansible | lib/ansible/module_utils/network/eos/argspec/lacp/lacp.py | 21 | 1317 | # -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the eos_lacp module
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class LacpArgs(object):
"""The arg spec for the eos_lacp module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
'config': {
'options': {
'system': {
'options': {
'priority': {'type': 'int'}
},
'type': 'dict'
}
},
'type': 'dict'
},
'state': {'choices': ['merged', 'replaced', 'deleted'], 'default': 'merged', 'type': 'str'}}
| gpl-3.0 |
rhelmer/socorro-webapp | crashstats/tokens/views.py | 4 | 2819 | from django import http
from django.contrib.auth.models import Permission
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import RequestSite
from django.shortcuts import render, get_object_or_404, redirect
from django.db import transaction
from . import models
from . import forms
@login_required
@transaction.commit_on_success
def home(request):
context = {}
all_possible_permissions = (
Permission.objects.filter(content_type__model='')
.order_by('name')
)
possible_permissions = []
for permission in all_possible_permissions:
if request.user.has_perm('crashstats.' + permission.codename):
possible_permissions.append(permission)
if request.method == 'POST':
form = forms.GenerateTokenForm(
request.POST,
possible_permissions=possible_permissions
)
if form.is_valid():
for permission in form.cleaned_data['permissions']:
perm_name = 'crashstats.%s' % permission.codename
if not request.user.has_perm(perm_name):
return http.HttpResponseForbidden(
'You do not have this permission'
)
token = models.Token.objects.create(
user=request.user,
notes=form.cleaned_data['notes']
)
for permission in form.cleaned_data['permissions']:
token.permissions.add(permission)
return redirect('tokens:home')
else:
if possible_permissions:
form = forms.GenerateTokenForm(
possible_permissions=possible_permissions
)
else:
# This is surprisingly important!
# If you *have* permissions, you can actually create a
# token without selecting *any* permissions. The point of
# that is to avoid the rate limiter.
# If you don't have any permissions attached to your user
# account means you haven't been hand curated by any
# administrator and if that's the case you shouldn't be able
# avoid the rate limiter.
form = None
context['form'] = form
context['your_tokens'] = (
models.Token.objects
.filter(user=request.user)
.order_by('-created')
)
context['absolute_base_url'] = (
'%s://%s' % (
request.is_secure() and 'https' or 'http',
RequestSite(request).domain
)
)
return render(request, 'tokens/home.html', context)
@login_required
@transaction.commit_on_success
def delete_token(request, pk):
token = get_object_or_404(models.Token, pk=pk, user=request.user)
token.delete()
return redirect('tokens:home')
| mpl-2.0 |
Shinichi-Nakagawa/tsubuyaki_league_draft_list_script | tables.py | 1 | 17770 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import Column, DateTime, Float, Integer, String, Table, text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class AllstarFull(Base):
__tablename__ = u'AllstarFull'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
gameNum = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
gameID = Column(String(12))
teamID = Column(String(3))
lgID = Column(String(2))
GP = Column(Integer)
startingPos = Column(Integer)
class Appearance(Base):
__tablename__ = u'Appearances'
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
teamID = Column(String(3), primary_key=True, nullable=False, server_default=text("''"))
lgID = Column(String(2))
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
G_all = Column(Integer)
GS = Column(Integer)
G_batting = Column(Integer)
G_defense = Column(Integer)
G_p = Column(Integer)
G_c = Column(Integer)
G_1b = Column(Integer)
G_2b = Column(Integer)
G_3b = Column(Integer)
G_ss = Column(Integer)
G_lf = Column(Integer)
G_cf = Column(Integer)
G_rf = Column(Integer)
G_of = Column(Integer)
G_dh = Column(Integer)
G_ph = Column(Integer)
G_pr = Column(Integer)
class AwardsManager(Base):
__tablename__ = u'AwardsManagers'
playerID = Column(String(10), primary_key=True, nullable=False, server_default=text("''"))
awardID = Column(String(75), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
lgID = Column(String(2), primary_key=True, nullable=False, server_default=text("''"))
tie = Column(String(1))
notes = Column(String(100))
class AwardsPlayer(Base):
__tablename__ = u'AwardsPlayers'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
awardID = Column(String(255), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
lgID = Column(String(2), primary_key=True, nullable=False, server_default=text("''"))
tie = Column(String(1))
notes = Column(String(100))
class AwardsShareManager(Base):
__tablename__ = u'AwardsShareManagers'
awardID = Column(String(25), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
lgID = Column(String(2), primary_key=True, nullable=False, server_default=text("''"))
playerID = Column(String(10), primary_key=True, nullable=False, server_default=text("''"))
pointsWon = Column(Integer)
pointsMax = Column(Integer)
votesFirst = Column(Integer)
class AwardsSharePlayer(Base):
__tablename__ = u'AwardsSharePlayers'
awardID = Column(String(25), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
lgID = Column(String(2), primary_key=True, nullable=False, server_default=text("''"))
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
pointsWon = Column(Float(asdecimal=True))
pointsMax = Column(Integer)
votesFirst = Column(Float(asdecimal=True))
class Batting(Base):
__tablename__ = u'Batting'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
stint = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
teamID = Column(String(3))
lgID = Column(String(2))
G = Column(Integer)
G_batting = Column(Integer)
AB = Column(Integer)
R = Column(Integer)
H = Column(Integer)
_2B = Column(u'2B', Integer)
_3B = Column(u'3B', Integer)
HR = Column(Integer)
RBI = Column(Integer)
SB = Column(Integer)
CS = Column(Integer)
BB = Column(Integer)
SO = Column(Integer)
IBB = Column(Integer)
HBP = Column(Integer)
SH = Column(Integer)
SF = Column(Integer)
GIDP = Column(Integer)
G_old = Column(Integer)
class BattingPost(Base):
__tablename__ = u'BattingPost'
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
round = Column(String(10), primary_key=True, nullable=False, server_default=text("''"))
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
teamID = Column(String(3))
lgID = Column(String(2))
G = Column(Integer)
AB = Column(Integer)
R = Column(Integer)
H = Column(Integer)
_2B = Column(u'2B', Integer)
_3B = Column(u'3B', Integer)
HR = Column(Integer)
RBI = Column(Integer)
SB = Column(Integer)
CS = Column(Integer)
BB = Column(Integer)
SO = Column(Integer)
IBB = Column(Integer)
HBP = Column(Integer)
SH = Column(Integer)
SF = Column(Integer)
GIDP = Column(Integer)
class BattingTotal(Base):
__tablename__ = u'BattingTotal'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
G = Column(Integer)
G_batting = Column(Integer)
AB = Column(Integer)
R = Column(Integer)
H = Column(Integer)
_2B = Column(u'2B', Integer)
_3B = Column(u'3B', Integer)
HR = Column(Integer)
RBI = Column(Integer)
SB = Column(Integer)
CS = Column(Integer)
BB = Column(Integer)
SO = Column(Integer)
IBB = Column(Integer)
HBP = Column(Integer)
SH = Column(Integer)
SF = Column(Integer)
GIDP = Column(Integer)
G_old = Column(Integer)
class CollegePlaying(Base):
__tablename__ = u'CollegePlaying'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
schoolID = Column(String(15), primary_key=True)
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
class Fielding(Base):
__tablename__ = u'Fielding'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
stint = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
teamID = Column(String(3))
lgID = Column(String(2))
POS = Column(String(2), primary_key=True, nullable=False, server_default=text("''"))
G = Column(Integer)
GS = Column(Integer)
InnOuts = Column(Integer)
PO = Column(Integer)
A = Column(Integer)
E = Column(Integer)
DP = Column(Integer)
PB = Column(Integer)
WP = Column(Integer)
SB = Column(Integer)
CS = Column(Integer)
ZR = Column(Float(asdecimal=True))
class FieldingOF(Base):
__tablename__ = u'FieldingOF'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
stint = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
Glf = Column(Integer)
Gcf = Column(Integer)
Grf = Column(Integer)
class FieldingPost(Base):
__tablename__ = u'FieldingPost'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
teamID = Column(String(3))
lgID = Column(String(2))
round = Column(String(10), primary_key=True, nullable=False, server_default=text("''"))
POS = Column(String(2), primary_key=True, nullable=False, server_default=text("''"))
G = Column(Integer)
GS = Column(Integer)
InnOuts = Column(Integer)
PO = Column(Integer)
A = Column(Integer)
E = Column(Integer)
DP = Column(Integer)
TP = Column(Integer)
PB = Column(Integer)
SB = Column(Integer)
CS = Column(Integer)
class HallOfFame(Base):
__tablename__ = u'HallOfFame'
playerID = Column(String(10), primary_key=True, nullable=False, server_default=text("''"))
yearid = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
votedBy = Column(String(64), primary_key=True, nullable=False, server_default=text("''"))
ballots = Column(Integer)
needed = Column(Integer)
votes = Column(Integer)
inducted = Column(String(1))
category = Column(String(20))
needed_note = Column(String(25))
class Manager(Base):
__tablename__ = u'Managers'
playerID = Column(String(10))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
teamID = Column(String(3), primary_key=True, nullable=False, server_default=text("''"))
lgID = Column(String(2))
inseason = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
G = Column(Integer)
W = Column(Integer)
L = Column(Integer)
rank = Column(Integer)
plyrMgr = Column(String(1))
class ManagersHalf(Base):
__tablename__ = u'ManagersHalf'
playerID = Column(String(10), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
teamID = Column(String(3), primary_key=True, nullable=False, server_default=text("''"))
lgID = Column(String(2))
inseason = Column(Integer)
half = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
G = Column(Integer)
W = Column(Integer)
L = Column(Integer)
rank = Column(Integer)
class Master(Base):
__tablename__ = u'Master'
playerID = Column(String(10), primary_key=True)
birthYear = Column(Integer)
birthMonth = Column(Integer)
birthDay = Column(Integer)
birthCountry = Column(String(50))
birthState = Column(String(2))
birthCity = Column(String(50))
deathYear = Column(Integer)
deathMonth = Column(Integer)
deathDay = Column(Integer)
deathCountry = Column(String(50))
deathState = Column(String(2))
deathCity = Column(String(50))
nameFirst = Column(String(50))
nameLast = Column(String(50))
nameGiven = Column(String(255))
weight = Column(Integer)
height = Column(Float(asdecimal=True))
bats = Column(String(1))
throws = Column(String(1))
debut = Column(DateTime)
finalGame = Column(DateTime)
retroID = Column(String(9))
bbrefID = Column(String(9))
class Pitching(Base):
__tablename__ = u'Pitching'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
stint = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
teamID = Column(String(3))
lgID = Column(String(2))
W = Column(Integer)
L = Column(Integer)
G = Column(Integer)
GS = Column(Integer)
CG = Column(Integer)
SHO = Column(Integer)
SV = Column(Integer)
IPouts = Column(Integer)
H = Column(Integer)
ER = Column(Integer)
HR = Column(Integer)
BB = Column(Integer)
SO = Column(Integer)
BAOpp = Column(Float(asdecimal=True))
ERA = Column(Float(asdecimal=True))
IBB = Column(Integer)
WP = Column(Integer)
HBP = Column(Integer)
BK = Column(Integer)
BFP = Column(Integer)
GF = Column(Integer)
R = Column(Integer)
SH = Column(Integer)
SF = Column(Integer)
GIDP = Column(Integer)
class PitchingPost(Base):
__tablename__ = u'PitchingPost'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
round = Column(String(10), primary_key=True, nullable=False, server_default=text("''"))
teamID = Column(String(3))
lgID = Column(String(2))
W = Column(Integer)
L = Column(Integer)
G = Column(Integer)
GS = Column(Integer)
CG = Column(Integer)
SHO = Column(Integer)
SV = Column(Integer)
IPouts = Column(Integer)
H = Column(Integer)
ER = Column(Integer)
HR = Column(Integer)
BB = Column(Integer)
SO = Column(Integer)
BAOpp = Column(Float(asdecimal=True))
ERA = Column(Float(asdecimal=True))
IBB = Column(Integer)
WP = Column(Integer)
HBP = Column(Integer)
BK = Column(Integer)
BFP = Column(Integer)
GF = Column(Integer)
R = Column(Integer)
SH = Column(Integer)
SF = Column(Integer)
GIDP = Column(Integer)
class PitchingTotal(Base):
__tablename__ = u'PitchingTotal'
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
W = Column(Integer)
L = Column(Integer)
G = Column(Integer)
GS = Column(Integer)
CG = Column(Integer)
SHO = Column(Integer)
SV = Column(Integer)
IPouts = Column(Integer)
H = Column(Integer)
ER = Column(Integer)
HR = Column(Integer)
BB = Column(Integer)
SO = Column(Integer)
IBB = Column(Integer)
WP = Column(Integer)
HBP = Column(Integer)
BK = Column(Integer)
BFP = Column(Integer)
GF = Column(Integer)
R = Column(Integer)
SH = Column(Integer)
SF = Column(Integer)
GIDP = Column(Integer)
class Salary(Base):
__tablename__ = u'Salaries'
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
teamID = Column(String(3), primary_key=True, nullable=False, server_default=text("''"))
lgID = Column(String(2), primary_key=True, nullable=False, server_default=text("''"))
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
salary = Column(Float(asdecimal=True))
class SalariesTotal(Base):
__tablename__ = u'SalariesTotal'
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
playerID = Column(String(9), primary_key=True, nullable=False, server_default=text("''"))
salary = Column(Float(asdecimal=True))
class School(Base):
__tablename__ = u'Schools'
schoolID = Column(String(15), primary_key=True)
name_full = Column(String(255))
city = Column(String(55))
state = Column(String(55))
country = Column(String(55))
class SeriesPost(Base):
__tablename__ = u'SeriesPost'
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
round = Column(String(5), primary_key=True, nullable=False, server_default=text("''"))
teamIDwinner = Column(String(3))
lgIDwinner = Column(String(2))
teamIDloser = Column(String(3))
lgIDloser = Column(String(2))
wins = Column(Integer)
losses = Column(Integer)
ties = Column(Integer)
class Team(Base):
__tablename__ = u'Teams'
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
lgID = Column(String(2), primary_key=True, nullable=False, server_default=text("''"))
teamID = Column(String(3), primary_key=True, nullable=False, server_default=text("''"))
franchID = Column(String(3))
divID = Column(String(1))
Rank = Column(Integer)
G = Column(Integer)
Ghome = Column(Integer)
W = Column(Integer)
L = Column(Integer)
DivWin = Column(String(1))
WCWin = Column(String(1))
LgWin = Column(String(1))
WSWin = Column(String(1))
R = Column(Integer)
AB = Column(Integer)
H = Column(Integer)
_2B = Column(u'2B', Integer)
_3B = Column(u'3B', Integer)
HR = Column(Integer)
BB = Column(Integer)
SO = Column(Integer)
SB = Column(Integer)
CS = Column(Integer)
HBP = Column(Integer)
SF = Column(Integer)
RA = Column(Integer)
ER = Column(Integer)
ERA = Column(Float(asdecimal=True))
CG = Column(Integer)
SHO = Column(Integer)
SV = Column(Integer)
IPouts = Column(Integer)
HA = Column(Integer)
HRA = Column(Integer)
BBA = Column(Integer)
SOA = Column(Integer)
E = Column(Integer)
DP = Column(Integer)
FP = Column(Float(asdecimal=True))
name = Column(String(50))
park = Column(String(255))
attendance = Column(Integer)
BPF = Column(Integer)
PPF = Column(Integer)
teamIDBR = Column(String(3))
teamIDlahman45 = Column(String(3))
teamIDretro = Column(String(3))
class TeamsFranchise(Base):
__tablename__ = u'TeamsFranchises'
franchID = Column(String(3), primary_key=True)
franchName = Column(String(50))
active = Column(String(2))
NAassoc = Column(String(3))
class TeamsHalf(Base):
__tablename__ = u'TeamsHalf'
yearID = Column(Integer, primary_key=True, nullable=False, server_default=text("'0'"))
lgID = Column(String(2), primary_key=True, nullable=False, server_default=text("''"))
teamID = Column(String(3), primary_key=True, nullable=False, server_default=text("''"))
Half = Column(String(1), primary_key=True, nullable=False, server_default=text("''"))
divID = Column(String(1))
DivWin = Column(String(1))
Rank = Column(Integer)
G = Column(Integer)
W = Column(Integer)
L = Column(Integer)
| mit |
hickford/youtube-dl | youtube_dl/extractor/roxwel.py | 180 | 1965 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import unified_strdate, determine_ext
class RoxwelIE(InfoExtractor):
_VALID_URL = r'https?://www\.roxwel\.com/player/(?P<filename>.+?)(\.|\?|$)'
_TEST = {
'url': 'http://www.roxwel.com/player/passionpittakeawalklive.html',
'info_dict': {
'id': 'passionpittakeawalklive',
'ext': 'flv',
'title': 'Take A Walk (live)',
'uploader': 'Passion Pit',
'uploader_id': 'passionpit',
'upload_date': '20120928',
'description': 'Passion Pit performs "Take A Walk\" live at The Backyard in Austin, Texas. ',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
filename = mobj.group('filename')
info_url = 'http://www.roxwel.com/api/videos/%s' % filename
info = self._download_json(info_url, filename)
rtmp_rates = sorted([int(r.replace('flv_', '')) for r in info['media_rates'] if r.startswith('flv_')])
best_rate = rtmp_rates[-1]
url_page_url = 'http://roxwel.com/pl_one_time.php?filename=%s&quality=%s' % (filename, best_rate)
rtmp_url = self._download_webpage(url_page_url, filename, 'Downloading video url')
ext = determine_ext(rtmp_url)
if ext == 'f4v':
rtmp_url = rtmp_url.replace(filename, 'mp4:%s' % filename)
return {
'id': filename,
'title': info['title'],
'url': rtmp_url,
'ext': 'flv',
'description': info['description'],
'thumbnail': info.get('player_image_url') or info.get('image_url_large'),
'uploader': info['artist'],
'uploader_id': info['artistname'],
'upload_date': unified_strdate(info['dbdate']),
}
| unlicense |
rdeva31/kernel-msm-3.10 | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
ansible/ansible | lib/ansible/galaxy/dependency_resolution/providers.py | 20 | 14391 | # -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Requirement provider interfaces."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import functools
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Iterable, List, NamedTuple, Optional, Union
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
from ansible.galaxy.dependency_resolution.dataclasses import (
Candidate,
Requirement,
)
from ansible.galaxy.dependency_resolution.versioning import (
is_pre_release,
meets_requirements,
)
from ansible.utils.version import SemanticVersion
from resolvelib import AbstractProvider
class CollectionDependencyProvider(AbstractProvider):
"""Delegate providing a requirement interface for the resolver."""
def __init__(
self, # type: CollectionDependencyProvider
apis, # type: MultiGalaxyAPIProxy
concrete_artifacts_manager=None, # type: ConcreteArtifactsManager
user_requirements=None, # type: Iterable[Requirement]
preferred_candidates=None, # type: Iterable[Candidate]
with_deps=True, # type: bool
with_pre_releases=False, # type: bool
upgrade=False, # type: bool
): # type: (...) -> None
r"""Initialize helper attributes.
:param api: An instance of the multiple Galaxy APIs wrapper.
:param concrete_artifacts_manager: An instance of the caching \
concrete artifacts manager.
:param with_deps: A flag specifying whether the resolver \
should attempt to pull-in the deps of the \
requested requirements. On by default.
:param with_pre_releases: A flag specifying whether the \
resolver should skip pre-releases. \
Off by default.
"""
self._api_proxy = apis
self._make_req_from_dict = functools.partial(
Requirement.from_requirement_dict,
art_mgr=concrete_artifacts_manager,
)
self._pinned_candidate_requests = set(
Candidate(req.fqcn, req.ver, req.src, req.type)
for req in (user_requirements or ())
if req.is_concrete_artifact or (
req.ver != '*' and
not req.ver.startswith(('<', '>', '!='))
)
)
self._preferred_candidates = set(preferred_candidates or ())
self._with_deps = with_deps
self._with_pre_releases = with_pre_releases
self._upgrade = upgrade
def _is_user_requested(self, candidate): # type: (Candidate) -> bool
"""Check if the candidate is requested by the user."""
if candidate in self._pinned_candidate_requests:
return True
if candidate.is_online_index_pointer and candidate.src is not None:
# NOTE: Candidate is a namedtuple, it has a source server set
# NOTE: to a specific GalaxyAPI instance or `None`. When the
# NOTE: user runs
# NOTE:
# NOTE: $ ansible-galaxy collection install ns.coll
# NOTE:
# NOTE: then it's saved in `self._pinned_candidate_requests`
# NOTE: as `('ns.coll', '*', None, 'galaxy')` but then
# NOTE: `self.find_matches()` calls `self.is_satisfied_by()`
# NOTE: with Candidate instances bound to each specific
# NOTE: server available, those look like
# NOTE: `('ns.coll', '*', GalaxyAPI(...), 'galaxy')` and
# NOTE: wouldn't match the user requests saved in
# NOTE: `self._pinned_candidate_requests`. This is why we
# NOTE: normalize the collection to have `src=None` and try
# NOTE: again.
# NOTE:
# NOTE: When the user request comes from `requirements.yml`
# NOTE: with the `source:` set, it'll match the first check
# NOTE: but it still can have entries with `src=None` so this
# NOTE: normalized check is still necessary.
return Candidate(
candidate.fqcn, candidate.ver, None, candidate.type,
) in self._pinned_candidate_requests
return False
def identify(self, requirement_or_candidate):
# type: (Union[Candidate, Requirement]) -> str
"""Given requirement or candidate, return an identifier for it.
This is used to identify a requirement or candidate, e.g.
whether two requirements should have their specifier parts
(version ranges or pins) merged, whether two candidates would
conflict with each other (because they have same name but
different versions).
"""
return requirement_or_candidate.canonical_package_id
def get_preference(
self, # type: CollectionDependencyProvider
resolution, # type: Optional[Candidate]
candidates, # type: List[Candidate]
information, # type: List[NamedTuple]
): # type: (...) -> Union[float, int]
"""Return sort key function return value for given requirement.
This result should be based on preference that is defined as
"I think this requirement should be resolved first".
The lower the return value is, the more preferred this
group of arguments is.
:param resolution: Currently pinned candidate, or ``None``.
:param candidates: A list of possible candidates.
:param information: A list of requirement information.
Each ``information`` instance is a named tuple with two entries:
* ``requirement`` specifies a requirement contributing to
the current candidate list
* ``parent`` specifies the candidate that provides
(dependend on) the requirement, or `None`
to indicate a root requirement.
The preference could depend on a various of issues, including
(not necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should
probably be worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this
requirement? Those with few left should likely be worked on
first, I guess?
* Are there any known conflicts for this requirement?
We should probably work on those with the most
known conflicts.
A sortable value should be returned (this will be used as the
`key` parameter of the built-in sorting function). The smaller
the value is, the more preferred this requirement is (i.e. the
sorting function is called with ``reverse=False``).
"""
if any(
candidate in self._preferred_candidates
for candidate in candidates
):
# NOTE: Prefer pre-installed candidates over newer versions
# NOTE: available from Galaxy or other sources.
return float('-inf')
return len(candidates)
def find_matches(self, requirements):
# type: (List[Requirement]) -> List[Candidate]
r"""Find all possible candidates satisfying given requirements.
This tries to get candidates based on the requirements' types.
For concrete requirements (SCM, dir, namespace dir, local or
remote archives), the one-and-only match is returned
For a "named" requirement, Galaxy-compatible APIs are consulted
to find concrete candidates for this requirement. Of theres a
pre-installed candidate, it's prepended in front of others.
:param requirements: A collection of requirements which all of \
the returned candidates must match. \
All requirements are guaranteed to have \
the same identifier. \
The collection is never empty.
:returns: An iterable that orders candidates by preference, \
e.g. the most preferred candidate comes first.
"""
# FIXME: The first requirement may be a Git repo followed by
# FIXME: its cloned tmp dir. Using only the first one creates
# FIXME: loops that prevent any further dependency exploration.
# FIXME: We need to figure out how to prevent this.
first_req = requirements[0]
fqcn = first_req.fqcn
# The fqcn is guaranteed to be the same
coll_versions = self._api_proxy.get_collection_versions(first_req)
if first_req.is_concrete_artifact:
# FIXME: do we assume that all the following artifacts are also concrete?
# FIXME: does using fqcn==None cause us problems here?
return [
Candidate(fqcn, version, _none_src_server, first_req.type)
for version, _none_src_server in coll_versions
]
latest_matches = sorted(
{
candidate for candidate in (
Candidate(fqcn, version, src_server, 'galaxy')
for version, src_server in coll_versions
)
if all(self.is_satisfied_by(requirement, candidate) for requirement in requirements)
# FIXME
# if all(self.is_satisfied_by(requirement, candidate) and (
# requirement.src is None or # if this is true for some candidates but not all it will break key param - Nonetype can't be compared to str
# requirement.src == candidate.src
# ))
},
key=lambda candidate: (
SemanticVersion(candidate.ver), candidate.src,
),
reverse=True, # prefer newer versions over older ones
)
preinstalled_candidates = {
candidate for candidate in self._preferred_candidates
if candidate.fqcn == fqcn and
(
# check if an upgrade is necessary
all(self.is_satisfied_by(requirement, candidate) for requirement in requirements) and
(
not self._upgrade or
# check if an upgrade is preferred
all(SemanticVersion(latest.ver) <= SemanticVersion(candidate.ver) for latest in latest_matches)
)
)
}
return list(preinstalled_candidates) + latest_matches
def is_satisfied_by(self, requirement, candidate):
# type: (Requirement, Candidate) -> bool
r"""Whether the given requirement is satisfiable by a candidate.
:param requirement: A requirement that produced the `candidate`.
:param candidate: A pinned candidate supposedly matchine the \
`requirement` specifier. It is guaranteed to \
have been generated from the `requirement`.
:returns: Indication whether the `candidate` is a viable \
solution to the `requirement`.
"""
# NOTE: Only allow pre-release candidates if we want pre-releases
# NOTE: or the req ver was an exact match with the pre-release
# NOTE: version. Another case where we'd want to allow
# NOTE: pre-releases is when there are several user requirements
# NOTE: and one of them is a pre-release that also matches a
# NOTE: transitive dependency of another requirement.
allow_pre_release = self._with_pre_releases or not (
requirement.ver == '*' or
requirement.ver.startswith('<') or
requirement.ver.startswith('>') or
requirement.ver.startswith('!=')
) or self._is_user_requested(candidate)
if is_pre_release(candidate.ver) and not allow_pre_release:
return False
# NOTE: This is a set of Pipenv-inspired optimizations. Ref:
# https://github.com/sarugaku/passa/blob/2ac00f1/src/passa/models/providers.py#L58-L74
if (
requirement.is_virtual or
candidate.is_virtual or
requirement.ver == '*'
):
return True
return meets_requirements(
version=candidate.ver,
requirements=requirement.ver,
)
def get_dependencies(self, candidate):
# type: (Candidate) -> List[Candidate]
r"""Get direct dependencies of a candidate.
:returns: A collection of requirements that `candidate` \
specifies as its dependencies.
"""
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
# NOTE: The underlying implmentation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
# NOTE: after the `get_collection_dependencies()` call because
# NOTE: internally it polulates the artifact URL of the candidate,
# NOTE: its SHA hash and the Galaxy API token. These are still
# NOTE: necessary with `--no-deps` because even with the disabled
# NOTE: dependency resolution the outer layer will still need to
# NOTE: know how to download and validate the artifact.
#
# NOTE: Virtual candidates should always return dependencies
# NOTE: because they are ephemeral and non-installable.
if not self._with_deps and not candidate.is_virtual:
return []
return [
self._make_req_from_dict({'name': dep_name, 'version': dep_req})
for dep_name, dep_req in req_map.items()
]
| gpl-3.0 |
yfried/ansible | lib/ansible/modules/cloud/amazon/ec2_eni_facts.py | 17 | 9488 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_eni_facts
short_description: Gather facts about ec2 ENI interfaces in AWS
description:
- Gather facts about ec2 ENI interfaces in AWS
version_added: "2.0"
author: "Rob White (@wimnat)"
requirements: [ boto3 ]
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all ENIs
- ec2_eni_facts:
# Gather facts about a particular ENI
- ec2_eni_facts:
filters:
network-interface-id: eni-xxxxxxx
'''
RETURN = '''
network_interfaces:
description: List of matching elastic network interfaces
returned: always
type: complex
contains:
association:
description: Info of associated elastic IP (EIP)
returned: always, empty dict if no association exists
type: dict
sample: {
allocation_id: "eipalloc-5sdf123",
association_id: "eipassoc-8sdf123",
ip_owner_id: "4415120123456",
public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com",
public_ip: "52.1.0.63"
}
attachment:
description: Infor about attached ec2 instance
returned: always, empty dict if ENI is not attached
type: dict
sample: {
attach_time: "2017-08-05T15:25:47+00:00",
attachment_id: "eni-attach-149d21234",
delete_on_termination: false,
device_index: 1,
instance_id: "i-15b8d3cadbafa1234",
instance_owner_id: "4415120123456",
status: "attached"
}
availability_zone:
description: Availability zone of ENI
returned: always
type: string
sample: "us-east-1b"
description:
description: Description text for ENI
returned: always
type: string
sample: "My favourite network interface"
groups:
description: List of attached security groups
returned: always
type: list
sample: [
{
group_id: "sg-26d0f1234",
group_name: "my_ec2_security_group"
}
]
id:
description: The id of the ENI (alias for network_interface_id)
returned: always
type: string
sample: "eni-392fsdf"
interface_type:
description: Type of the network interface
returned: always
type: string
sample: "interface"
ipv6_addresses:
description: List of IPv6 addresses for this interface
returned: always
type: list
sample: []
mac_address:
description: MAC address of the network interface
returned: always
type: string
sample: "0a:f8:10:2f:ab:a1"
network_interface_id:
description: The id of the ENI
returned: always
type: string
sample: "eni-392fsdf"
owner_id:
description: AWS account id of the owner of the ENI
returned: always
type: string
sample: "4415120123456"
private_dns_name:
description: Private DNS name for the ENI
returned: always
type: string
sample: "ip-172-16-1-180.ec2.internal"
private_ip_address:
description: Private IP address for the ENI
returned: always
type: string
sample: "172.16.1.180"
private_ip_addresses:
description: List of private IP addresses attached to the ENI
returned: always
type: list
sample: []
requester_id:
description: The ID of the entity that launched the ENI
returned: always
type: string
sample: "AIDAIONYVJQNIAZFT3ABC"
requester_managed:
description: Indicates whether the network interface is being managed by an AWS service.
returned: always
type: bool
sample: false
source_dest_check:
description: Indicates whether the network interface performs source/destination checking.
returned: always
type: bool
sample: false
status:
description: Indicates if the network interface is attached to an instance or not
returned: always
type: string
sample: "in-use"
subnet_id:
description: Subnet ID the ENI is in
returned: always
type: string
sample: "subnet-7bbf01234"
tag_set:
description: Dictionary of tags added to the ENI
returned: always
type: dict
sample: {}
vpc_id:
description: ID of the VPC the network interface it part of
returned: always
type: string
sample: "vpc-b3f1f123"
'''
try:
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_conn
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
def list_eni(connection, module):
if module.params.get("filters") is None:
filters = []
else:
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
try:
network_interfaces_result = connection.describe_network_interfaces(Filters=filters)['NetworkInterfaces']
except (ClientError, NoCredentialsError) as e:
module.fail_json(msg=e.message)
# Modify boto3 tags list to be ansible friendly dict and then camel_case
camel_network_interfaces = []
for network_interface in network_interfaces_result:
network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet'])
# Added id to interface info to be compatible with return values of ec2_eni module:
network_interface['Id'] = network_interface['NetworkInterfaceId']
camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface))
module.exit_json(network_interfaces=camel_network_interfaces)
def get_eni_info(interface):
# Private addresses
private_addresses = []
for ip in interface.private_ip_addresses:
private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
'private_ip_addresses': private_addresses
}
if hasattr(interface, 'publicDnsName'):
interface_info['association'] = {'public_ip_address': interface.publicIp,
'public_dns_name': interface.publicDnsName,
'ip_owner_id': interface.ipOwnerId
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
list_eni(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
glensc/fedora-packager | src/fedora-cert.py | 1 | 2520 | #!/usr/bin/python
# fedora-cert - a command line tool to manage your fedora SSL user certificates
#
# Copyright (C) 2009-2010 Red Hat Inc.
# Author(s): Dennis Gilmore <dennis@ausil.us>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version. See http://www.gnu.org/copyleft/gpl.html for
# the full text of the license.
import optparse
import fedora_cert
import urlgrabber
import sys
def main(opts):
# lets read in the existing cert if it exists.
# gets us existing acc info
if not opts.username:
try:
username = fedora_cert.read_user_cert()
except:
print "Can't determine fas name, lets get a new cert"
try:
fedora_cert.create_user_cert(None)
except fedora_cert.fedora_cert_error, e:
print e
sys.exit(1)
sys.exit(0)
else:
username = opts.username
#has cert expired? do we force a new cert? get a new one
if opts.newcert:
print "Getting a new User Certificate"
try:
fedora_cert.create_user_cert(username)
except fedora_cert.fedora_cert_error, e:
print e
sys.exit(1)
sys.exit(0)
if fedora_cert.certificate_expired():
print "Certificate has expired, getting a new one"
try:
fedora_cert.create_user_cert(username)
except fedora_cert.fedora_cert_error, e:
print e
sys.exit(1)
sys.exit(0)
if opts.verifycert:
print "Verifying Certificate"
try:
fedora_cert.verify_cert()
except fedora_cert.fedora_cert_error, e:
print e
sys.exit(1)
print "CRL Checking not implemented yet"
if __name__ == '__main__':
opt_p = optparse.OptionParser(usage="%prog [OPTIONS] ")
opt_p.add_option('-u', '--username', action='store', dest='username',
default=False, help="FAS Username.")
opt_p.add_option('-n', '--new-cert', action='store_true', dest='newcert',
default=False, help="Generate a new Fedora Certificate.")
opt_p.add_option('-v', '--verify-cert', action='store_true', dest='verifycert',
default=False, help="Verify Certificate.")
(opts, args) = opt_p.parse_args()
main(opts)
| gpl-3.0 |
thaim/ansible | lib/ansible/plugins/cliconf/voss.py | 31 | 9777 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: voss
short_description: Use voss cliconf to run command on Extreme VOSS platform
description:
- This voss plugin provides low level abstraction apis for
sending and receiving CLI commands from Extreme VOSS network devices.
version_added: "2.7"
"""
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.voss.voss import VossNetworkConfig
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
@enable_mode
def get_config(self, source='running', flags=None, format=None):
if source not in ('running', 'startup'):
raise ValueError("fetching configuration from %s is not supported" % source)
if format:
raise ValueError("'format' value %s is not supported for get_config" % format)
if not flags:
flags = []
if source == 'running':
cmd = 'show running-config '
cmd += ' '.join(to_list(flags))
cmd = cmd.strip()
else:
cmd = 'more /intflash/config.cfg'
return self.send_command(cmd)
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
"""
Generate diff between candidate and running configuration. If the
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
candidate and running configurations are not required to be passed as argument.
In case if onbox diff capability is not supported candidate argument is mandatory
and running argument is optional.
:param candidate: The configuration which is expected to be present on remote host.
:param running: The base configuration which is used to generate diff.
:param diff_match: Instructs how to match the candidate configuration with current device configuration
Valid values are 'line', 'strict', 'exact', 'none'.
'line' - commands are matched line by line
'strict' - command lines are matched with respect to position
'exact' - command lines must be an equal match
'none' - will not compare the candidate configuration with the running configuration
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
:param path: The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
:param diff_replace: Instructs on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the replace argument is
set to I(block) then the entire command block is pushed to the device in
configuration mode if any line is not correct.
:return: Configuration diff in json format.
{
'config_diff': '',
}
"""
diff = {}
device_operations = self.get_device_operations()
option_values = self.get_option_values()
if candidate is None and device_operations['supports_generate_diff']:
raise ValueError("candidate configuration is required to generate diff")
if diff_match not in option_values['diff_match']:
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
if diff_replace not in option_values['diff_replace']:
raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace'])))
# prepare candidate configuration
candidate_obj = VossNetworkConfig(indent=0, ignore_lines=diff_ignore_lines)
candidate_obj.load(candidate)
if running and diff_match != 'none':
# running configuration
running_obj = VossNetworkConfig(indent=0, contents=running, ignore_lines=diff_ignore_lines)
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
else:
configdiffobjs = candidate_obj.items
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
diff['diff_path'] = path
diff['diff_replace'] = diff_replace
return diff
@enable_mode
def edit_config(self, candidate=None, commit=True, replace=None, comment=None):
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(operations, candidate, commit, replace, comment)
results = []
requests = []
if commit:
self.send_command('configure terminal')
for line in to_list(candidate):
if not isinstance(line, Mapping):
line = {'command': line}
cmd = line['command']
if cmd != 'end' and cmd[0] != '!':
results.append(self.send_command(**line))
requests.append(cmd)
self.send_command('end')
else:
raise ValueError('check mode is not supported')
resp['request'] = requests
resp['response'] = results
return resp
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'voss'
reply = self.get(command='show sys-info')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'SysDescr\s+: \S+ \((\S+)\)', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'Chassis\s+: (\S+)', data)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'SysName\s+: (\S+)', data)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_device_operations(self):
return {
'supports_diff_replace': True,
'supports_commit': False,
'supports_rollback': False,
'supports_defaults': True,
'supports_onbox_diff': False,
'supports_commit_comment': False,
'supports_multiline_delimiter': False,
'supports_diff_match': True,
'supports_diff_ignore_lines': True,
'supports_generate_diff': True,
'supports_replace': False
}
def get_option_values(self):
return {
'format': ['text'],
'diff_match': ['line', 'strict', 'exact', 'none'],
'diff_replace': ['line', 'block'],
'output': []
}
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result['rpc'] += ['get_diff', 'run_commands', 'get_defaults_flag']
result['device_operations'] = self.get_device_operations()
result.update(self.get_option_values())
return json.dumps(result)
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {'command': cmd}
output = cmd.pop('output', None)
if output:
raise ValueError("'output' value %s is not supported for run_commands" % output)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc:
raise
out = getattr(e, 'err', e)
responses.append(out)
return responses
def get_defaults_flag(self):
return 'verbose'
| mit |
ujdhesa/unisubs | apps/thirdpartyaccounts/migrations/0005_fix_thumb_options.py | 5 | 20982 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Partner']", 'null': 'True', 'blank': 'True'}),
'pay_rate_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 11, 15, 15, 58, 2, 546893)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 11, 15, 15, 58, 2, 546824)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user', 'status'),)", 'object_name': 'Application'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.partner': {
'Meta': {'object_name': 'Partner'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'managed_partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.CustomUser']"}),
'can_request_paid_captions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'notify_interval': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '1'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'to': "orm['teams.Partner']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'null': 'True', 'thumb_sizes': '((288, 162), (120, 90))', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'thirdpartyaccounts.facebookaccount': {
'Meta': {'object_name': 'FacebookAccount'},
'avatar': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'thirdpartyaccounts.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'avatar': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'meta_1_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_1_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'meta_2_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_2_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'meta_3_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_3_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
'primary_audio_language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_sizes': '((288, 162), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['thirdpartyaccounts']
| agpl-3.0 |
sugarlabs/sugar | src/jarabe/journal/expandedentry.py | 1 | 20472 | # Copyright (C) 2007, One Laptop Per Child
# Copyright (C) 2008-2013, Sugar Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from gettext import gettext as _
import time
import os
from gi.repository import GObject
from gi.repository import GLib
from gi.repository import Gtk
import json
from sugar3.graphics import style
from sugar3.graphics.xocolor import XoColor
from sugar3.graphics.icon import CanvasIcon, get_icon_file_name
from sugar3.graphics.icon import Icon, CellRendererIcon
from sugar3.graphics.alert import Alert
from sugar3.util import format_size
from sugar3.graphics.objectchooser import get_preview_pixbuf
from sugar3.activity.activity import PREVIEW_SIZE
from jarabe.journal.keepicon import KeepIcon
from jarabe.journal.palettes import ObjectPalette, BuddyPalette
from jarabe.journal import misc
from jarabe.journal import model
from jarabe.journal import journalwindow
class Separator(Gtk.VBox):
def __init__(self, orientation):
Gtk.VBox.__init__(
self, background_color=style.COLOR_PANEL_GREY.get_gdk_color())
class BuddyList(Gtk.Alignment):
def __init__(self, buddies):
Gtk.Alignment.__init__(self)
self.set(0, 0, 0, 0)
hbox = Gtk.HBox()
for buddy in buddies:
nick_, color = buddy
icon = CanvasIcon(icon_name='computer-xo',
xo_color=XoColor(color),
pixel_size=style.STANDARD_ICON_SIZE)
icon.set_palette(BuddyPalette(buddy))
hbox.pack_start(icon, True, True, 0)
self.add(hbox)
class TextView(Gtk.TextView):
def __init__(self):
Gtk.TextView.__init__(self)
text_buffer = Gtk.TextBuffer()
self.set_buffer(text_buffer)
self.set_left_margin(style.DEFAULT_PADDING)
self.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
class CommentsView(Gtk.TreeView):
__gsignals__ = {
'comments-changed': (GObject.SignalFlags.RUN_FIRST, None, ([str])),
'clicked': (GObject.SignalFlags.RUN_FIRST, None, [object]),
}
FROM = 'from'
MESSAGE = 'message'
ICON = 'icon'
ICON_COLOR = 'icon-color'
COMMENT_ICON = 0
COMMENT_ICON_COLOR = 1
COMMENT_FROM = 2
COMMENT_MESSAGE = 3
COMMENT_ERASE_ICON = 4
COMMENT_ERASE_ICON_COLOR = 5
def __init__(self):
Gtk.TreeView.__init__(self)
self.set_headers_visible(False)
self._store = Gtk.ListStore(str, object, str, str, str, object)
self._comments = []
self._init_model()
def update_comments(self, comments):
self._store.clear()
if comments:
self._comments = json.loads(comments)
for comment in self._comments:
self._add_row(comment.get(self.FROM, ''),
comment.get(self.MESSAGE, ''),
comment.get(self.ICON, 'computer-xo'),
comment.get(self.ICON_COLOR, '#FFFFFF,#000000'))
def _get_selected_row(self):
selection = self.get_selection()
return selection.get_selected()
def _add_row(self, sender, message, icon_name, icon_color):
self._store.append((get_icon_file_name(icon_name),
XoColor(icon_color),
sender,
message,
get_icon_file_name('list-remove'),
XoColor('#FFFFFF,#000000')))
def _init_model(self):
self.set_model(self._store)
col = Gtk.TreeViewColumn()
who_icon = CellRendererCommentIcon()
col.pack_start(who_icon, False)
col.add_attribute(who_icon, 'file-name', self.COMMENT_ICON)
col.add_attribute(who_icon, 'xo-color', self.COMMENT_ICON_COLOR)
who_text = Gtk.CellRendererText()
col.pack_start(who_text, True)
col.add_attribute(who_text, 'text', self.COMMENT_FROM)
comment_text = Gtk.CellRendererText()
col.pack_start(comment_text, True)
col.add_attribute(comment_text, 'text', self.COMMENT_MESSAGE)
erase_icon = CellRendererCommentIcon()
erase_icon.connect('clicked', self._erase_comment_cb)
col.pack_start(erase_icon, False)
col.add_attribute(erase_icon, 'file-name', self.COMMENT_ERASE_ICON)
col.add_attribute(
erase_icon, 'xo-color', self.COMMENT_ERASE_ICON_COLOR)
self.append_column(col)
def _erase_comment_cb(self, widget, event):
alert = Alert()
entry = self.get_selection().get_selected()[1]
erase_string = _('Erase')
alert.props.title = erase_string
alert.props.msg = _('Do you want to permanently erase \"%s\"?') \
% self._store[entry][self.COMMENT_MESSAGE]
icon = Icon(icon_name='dialog-cancel')
alert.add_button(Gtk.ResponseType.CANCEL, _('Cancel'), icon)
icon.show()
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, erase_string, ok_icon)
ok_icon.show()
alert.connect('response', self._erase_alert_response_cb, entry)
journalwindow.get_journal_window().add_alert(alert)
alert.show()
def _erase_alert_response_cb(self, alert, response_id, entry):
journalwindow.get_journal_window().remove_alert(alert)
if response_id is Gtk.ResponseType.OK:
self._store.remove(entry)
# Regenerate comments from current contents of store
self._comments = []
for entry in self._store:
self._comments.append({
self.FROM: entry[self.COMMENT_FROM],
self.MESSAGE: entry[self.COMMENT_MESSAGE],
self.ICON: entry[self.COMMENT_ICON],
self.ICON_COLOR: '[%s]' % (
entry[self.COMMENT_ICON_COLOR].to_string()),
})
self.emit('comments-changed', json.dumps(self._comments))
class CellRendererCommentIcon(CellRendererIcon):
def __init__(self):
CellRendererIcon.__init__(self)
self.props.width = style.SMALL_ICON_SIZE
self.props.height = style.SMALL_ICON_SIZE
self.props.size = style.SMALL_ICON_SIZE
self.props.stroke_color = style.COLOR_BUTTON_GREY.get_svg()
self.props.fill_color = style.COLOR_BLACK.get_svg()
self.props.mode = Gtk.CellRendererMode.ACTIVATABLE
class BaseExpandedEntry(GObject.GObject):
def __init__(self):
# Create a header
self._keep_icon = None
self._keep_sid = None
self._icon = None
self._icon_box = None
self._title = None
self._date = None
def create_header(self):
header = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
self._keep_icon = self._create_keep_icon()
header.pack_start(self._keep_icon, False, False, style.DEFAULT_SPACING)
self._icon_box = Gtk.HBox()
header.pack_start(self._icon_box, False, False, style.DEFAULT_SPACING)
self._title = self._create_title()
header.pack_start(self._title, True, True, 0)
# TODO: create a version list popup instead of a date label
self._date = self._create_date()
header.pack_start(self._date, False, False, style.DEFAULT_SPACING)
return header
def _create_keep_icon(self):
keep_icon = KeepIcon()
return keep_icon
def _create_title(self):
entry = Gtk.Entry()
return entry
def _create_date(self):
date = Gtk.Label()
return date
class ExpandedEntry(Gtk.EventBox, BaseExpandedEntry):
def __init__(self, journalactivity):
BaseExpandedEntry.__init__(self)
self._journalactivity = journalactivity
Gtk.EventBox.__init__(self)
self._vbox = Gtk.VBox()
self.add(self._vbox)
self._metadata = None
self._update_title_sid = None
self.modify_bg(Gtk.StateType.NORMAL, style.COLOR_WHITE.get_gdk_color())
self._header = self.create_header()
self._vbox.pack_start(self._header, False, False,
style.DEFAULT_SPACING * 2)
self._keep_sid = self._keep_icon.connect(
'toggled', self._keep_icon_toggled_cb)
self._title.connect(
'focus-out-event', self._title_focus_out_event_cb)
if Gtk.Widget.get_default_direction() == Gtk.TextDirection.RTL:
# Reverse header children.
for child in self._header.get_children():
self._header.reorder_child(child, 0)
# Create a two-column body
body_box = Gtk.EventBox()
body_box.set_border_width(style.DEFAULT_SPACING)
body_box.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
self._vbox.pack_start(body_box, True, True, 0)
body = Gtk.HBox()
body_box.add(body)
first_column = Gtk.VBox()
body.pack_start(first_column, False, False, style.DEFAULT_SPACING)
second_column = Gtk.VBox()
body.pack_start(second_column, True, True, 0)
# First body column
self._preview_box = Gtk.Frame()
style_context = self._preview_box.get_style_context()
style_context.add_class('journal-preview-box')
first_column.pack_start(self._preview_box, False, True, 0)
self._technical_box = Gtk.VBox()
first_column.pack_start(self._technical_box, False, False, 0)
# Second body column
description_box, self._description = self._create_description()
second_column.pack_start(description_box, True, True,
style.DEFAULT_SPACING)
tags_box, self._tags = self._create_tags()
second_column.pack_start(tags_box, True, True,
style.DEFAULT_SPACING)
comments_box, self._comments = self._create_comments()
second_column.pack_start(comments_box, True, True,
style.DEFAULT_SPACING)
self._buddy_list = Gtk.VBox()
second_column.pack_start(self._buddy_list, True, False, 0)
self.show_all()
def set_metadata(self, metadata):
if self._metadata == metadata:
return
self._metadata = metadata
self._keep_icon.handler_block(self._keep_sid)
self._keep_icon.set_active(int(metadata.get('keep', 0)) == 1)
self._keep_icon.handler_unblock(self._keep_sid)
self._icon = self._create_icon()
for child in self._icon_box.get_children():
self._icon_box.remove(child)
# FIXME: self._icon_box.foreach(self._icon_box.remove)
self._icon_box.pack_start(self._icon, False, False, 0)
self._date.set_text(misc.get_date(metadata))
self._title.set_text(metadata.get('title', _('Untitled')))
if self._preview_box.get_child():
self._preview_box.remove(self._preview_box.get_child())
self._preview_box.add(self._create_preview())
for child in self._technical_box.get_children():
self._technical_box.remove(child)
# FIXME: self._technical_box.foreach(self._technical_box.remove)
self._technical_box.pack_start(self._create_technical(),
False, False, style.DEFAULT_SPACING)
for child in self._buddy_list.get_children():
self._buddy_list.remove(child)
# FIXME: self._buddy_list.foreach(self._buddy_list.remove)
self._buddy_list.pack_start(self._create_buddy_list(), False, False,
style.DEFAULT_SPACING)
description = metadata.get('description', '')
self._description.get_buffer().set_text(description)
tags = metadata.get('tags', '')
self._tags.get_buffer().set_text(tags)
comments = metadata.get('comments', '')
self._comments.update_comments(comments)
def _create_icon(self):
icon = CanvasIcon(file_name=misc.get_icon_name(self._metadata))
icon.connect_after('activate', self.__icon_activate_cb)
if misc.is_activity_bundle(self._metadata):
xo_color = XoColor('%s,%s' % (style.COLOR_BUTTON_GREY.get_svg(),
style.COLOR_TRANSPARENT.get_svg()))
else:
xo_color = misc.get_icon_color(self._metadata)
icon.props.xo_color = xo_color
icon.set_palette(ObjectPalette(self._journalactivity, self._metadata))
return icon
def _create_preview(self):
box = Gtk.EventBox()
box.modify_bg(Gtk.StateType.NORMAL, style.COLOR_WHITE.get_gdk_color())
metadata = self._metadata
pixbuf = get_preview_pixbuf(metadata.get('preview', ''))
has_preview = pixbuf is not None
if has_preview:
im = Gtk.Image()
im.set_from_pixbuf(pixbuf)
box.add(im)
im.show()
else:
label = Gtk.Label()
label.set_text(_('No preview'))
width, height = PREVIEW_SIZE[0], PREVIEW_SIZE[1]
label.set_size_request(width, height)
box.add(label)
label.show()
box.connect_after('button-release-event',
self._preview_box_button_release_event_cb)
return box
def _create_technical(self):
vbox = Gtk.VBox()
vbox.props.spacing = style.DEFAULT_SPACING
if 'filesize' in self._metadata:
filesize = self._metadata['filesize']
else:
filesize = model.get_file_size(self._metadata['uid'])
lines = [
_('Kind: %s') % (self._metadata.get('mime_type') or _('Unknown'),),
_('Date: %s') % (self._format_date(),),
_('Size: %s') % (format_size(int(filesize)))
]
for line in lines:
linebox = Gtk.HBox()
vbox.pack_start(linebox, False, False, 0)
text = Gtk.Label()
text.set_markup('<span foreground="%s">%s</span>' % (
style.COLOR_BUTTON_GREY.get_html(), line))
linebox.pack_start(text, False, False, 0)
return vbox
def _format_date(self):
if 'timestamp' in self._metadata:
try:
timestamp = float(self._metadata['timestamp'])
except (ValueError, TypeError):
logging.warning('Invalid timestamp for %r: %r',
self._metadata['uid'],
self._metadata['timestamp'])
else:
return time.strftime('%x', time.localtime(timestamp))
return _('No date')
def _create_buddy_list(self):
vbox = Gtk.VBox()
vbox.props.spacing = style.DEFAULT_SPACING
text = Gtk.Label()
text.set_markup('<span foreground="%s">%s</span>' % (
style.COLOR_BUTTON_GREY.get_html(), _('Participants:')))
halign = Gtk.Alignment.new(0, 0, 0, 0)
halign.add(text)
vbox.pack_start(halign, False, False, 0)
if self._metadata.get('buddies'):
buddies = list(json.loads(self._metadata['buddies']).values())
vbox.pack_start(BuddyList(buddies), False, False, 0)
return vbox
return vbox
def _create_scrollable(self, widget, label=None):
vbox = Gtk.VBox()
vbox.props.spacing = style.DEFAULT_SPACING
if label is not None:
text = Gtk.Label()
text.set_markup('<span foreground="%s">%s</span>' % (
style.COLOR_BUTTON_GREY.get_html(), label))
halign = Gtk.Alignment.new(0, 0, 0, 0)
halign.add(text)
vbox.pack_start(halign, False, False, 0)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
scrolled_window.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
scrolled_window.add(widget)
vbox.pack_start(scrolled_window, True, True, 0)
return vbox
def _create_description(self):
widget = TextView()
widget.connect('focus-out-event',
self._description_tags_focus_out_event_cb)
return self._create_scrollable(widget, label=_('Description:')), widget
def _create_tags(self):
widget = TextView()
widget.connect('focus-out-event',
self._description_tags_focus_out_event_cb)
return self._create_scrollable(widget, label=_('Tags:')), widget
def _create_comments(self):
widget = CommentsView()
widget.connect('comments-changed', self._comments_changed_cb)
return self._create_scrollable(widget, label=_('Comments:')), widget
def _title_notify_text_cb(self, entry, pspec):
if not self._update_title_sid:
self._update_title_sid = \
GLib.timeout_add_seconds(1,
self._update_title_cb)
def _title_focus_out_event_cb(self, entry, event):
self._update_entry()
def _description_tags_focus_out_event_cb(self, text_view, event):
self._update_entry()
def _comments_changed_cb(self, event, comments):
self._metadata['comments'] = comments
self._write_entry()
def _update_entry(self, needs_update=False):
if not model.is_editable(self._metadata):
return
old_title = self._metadata.get('title', None)
new_title = self._title.get_text()
if old_title != new_title:
self._icon.palette.props.primary_text = new_title
self._metadata['title'] = new_title
self._metadata['title_set_by_user'] = '1'
needs_update = True
bounds = self._tags.get_buffer().get_bounds()
old_tags = self._metadata.get('tags', None)
new_tags = self._tags.get_buffer().get_text(bounds[0], bounds[1],
include_hidden_chars=False)
if old_tags != new_tags:
self._metadata['tags'] = new_tags
needs_update = True
bounds = self._description.get_buffer().get_bounds()
old_description = self._metadata.get('description', None)
new_description = self._description.get_buffer().get_text(
bounds[0], bounds[1], include_hidden_chars=False)
if old_description != new_description:
self._metadata['description'] = new_description
needs_update = True
if needs_update:
self._write_entry()
self._update_title_sid = None
def _write_entry(self):
if self._metadata.get('mountpoint', '/') == '/':
model.write(self._metadata, update_mtime=False)
else:
old_file_path = os.path.join(
self._metadata['mountpoint'],
model.get_file_name(self._metadata['title'],
self._metadata['mime_type']))
model.write(self._metadata, file_path=old_file_path,
update_mtime=False)
def _keep_icon_toggled_cb(self, keep_icon):
if keep_icon.get_active():
self._metadata['keep'] = '1'
else:
self._metadata['keep'] = '0'
self._update_entry(needs_update=True)
def __icon_activate_cb(self, button):
misc.resume(self._metadata,
alert_window=journalwindow.get_journal_window())
return True
def _preview_box_button_release_event_cb(self, button, event):
logging.debug('_preview_box_button_release_event_cb')
misc.resume(self._metadata,
alert_window=journalwindow.get_journal_window())
return True
| gpl-3.0 |
winklerand/pandas | asv_bench/benchmarks/frame_ctor.py | 1 | 4201 | import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range
try:
from pandas.tseries import offsets
except:
from pandas.core.datetools import * # noqa
from .pandas_vb_common import setup # noqa
class FromDicts(object):
goal_time = 0.2
def setup(self):
N, K = 5000, 50
index = tm.makeStringIndex(N)
columns = tm.makeStringIndex(K)
frame = DataFrame(np.random.randn(N, K), index=index, columns=columns)
self.data = frame.to_dict()
self.some_dict = list(self.data.values())[0]
self.dict_list = frame.to_dict(orient='records')
self.data2 = {i: {j: float(j) for j in range(100)}
for i in range(2000)}
def time_frame_ctor_list_of_dict(self):
DataFrame(self.dict_list)
def time_frame_ctor_nested_dict(self):
DataFrame(self.data)
def time_series_ctor_from_dict(self):
Series(self.some_dict)
def time_frame_ctor_nested_dict_int64(self):
# nested dict, integer indexes, regression described in #621
DataFrame(self.data2)
class FromSeries(object):
goal_time = 0.2
def setup(self):
mi = MultiIndex.from_product([range(100), range(100)])
self.s = Series(np.random.randn(10000), index=mi)
def time_frame_from_mi_series(self):
DataFrame(self.s)
# ----------------------------------------------------------------------
# From dict with DatetimeIndex with all offsets
# dynamically generate benchmarks for every offset
#
# get_period_count & get_index_for_offset are there because blindly taking each
# offset times 1000 can easily go out of Timestamp bounds and raise errors.
def get_period_count(start_date, off):
ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
if (ten_offsets_in_days == 0):
return 1000
else:
periods = 9 * (Timestamp.max - start_date).days // ten_offsets_in_days
return min(periods, 1000)
def get_index_for_offset(off):
start_date = Timestamp('1/1/1900')
return date_range(start_date,
periods=get_period_count(start_date, off),
freq=off)
all_offsets = offsets.__all__
# extra cases
for off in ['FY5253', 'FY5253Quarter']:
all_offsets.pop(all_offsets.index(off))
all_offsets.extend([off + '_1', off + '_2'])
class FromDictwithTimestampOffsets(object):
params = [all_offsets, [1, 2]]
param_names = ['offset', 'n_steps']
offset_kwargs = {'WeekOfMonth': {'weekday': 1, 'week': 1},
'LastWeekOfMonth': {'weekday': 1, 'week': 1},
'FY5253': {'startingMonth': 1, 'weekday': 1},
'FY5253Quarter': {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 1}}
offset_extra_cases = {'FY5253': {'variation': ['nearest', 'last']},
'FY5253Quarter': {'variation': ['nearest', 'last']}}
def setup(self, offset, n_steps):
np.random.seed(1234)
extra = False
if offset.endswith("_", None, -1):
extra = int(offset[-1])
offset = offset[:-2]
kwargs = {}
if offset in self.offset_kwargs:
kwargs = self.offset_kwargs[offset]
if extra:
extras = self.offset_extra_cases[offset]
for extra_arg in extras:
kwargs[extra_arg] = extras[extra_arg][extra - 1]
offset = getattr(offsets, offset)
self.idx = get_index_for_offset(offset(n_steps, **kwargs))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = self.df.to_dict()
def time_frame_ctor(self, offset, n_steps):
DataFrame(self.d)
class FromRecords(object):
goal_time = 0.2
params = [None, 1000]
param_names = ['nrows']
def setup(self, nrows):
N = 100000
self.gen = ((x, (x * 20), (x * 100)) for x in range(N))
def time_frame_from_records_generator(self, nrows):
# issue-6700
self.df = DataFrame.from_records(self.gen, nrows=nrows)
| bsd-3-clause |
elelsee/pycfn-elasticsearch | pycfn_elasticsearch/vendored/botocore/docs/params.py | 4 | 8380 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.docs.shape import ShapeDocumenter
from botocore.docs.utils import py_type_name
class BaseParamsDocumenter(ShapeDocumenter):
def document_params(self, section, shape, include=None, exclude=None):
"""Fills out the documentation for a section given a model shape.
:param section: The section to write the documentation to.
:param shape: The shape of the operation.
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
history = []
self.traverse_and_document_shape(
section=section, shape=shape, history=history,
name=None, include=include, exclude=exclude)
def document_recursive_shape(self, section, shape, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
def document_shape_default(self, section, shape, history, include=None,
exclude=None, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
def document_shape_type_list(self, section, shape, history, include=None,
exclude=None, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
param_shape = shape.member
param_section = section.add_new_section(
param_shape.name, context={'shape': shape.member.name})
self._start_nested_param(param_section)
self.traverse_and_document_shape(
section=param_section, shape=param_shape,
history=history, name=None)
section = section.add_new_section('end-list')
self._end_nested_param(section)
def document_shape_type_map(self, section, shape, history, include=None,
exclude=None, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
key_section = section.add_new_section(
'key', context={'shape': shape.key.name})
self._start_nested_param(key_section)
self._add_member_documentation(key_section, shape.key)
param_section = section.add_new_section(
shape.value.name, context={'shape': shape.value.name})
param_section.style.indent()
self._start_nested_param(param_section)
self.traverse_and_document_shape(
section=param_section, shape=shape.value,
history=history, name=None)
end_section = section.add_new_section('end-map')
self._end_nested_param(end_section)
self._end_nested_param(end_section)
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None,
name=None, **kwargs):
members = self._add_members_to_shape(shape.members, include)
self._add_member_documentation(section, shape, name=name)
for param in members:
if exclude and param in exclude:
continue
param_shape = members[param]
param_section = section.add_new_section(
param, context={'shape': param_shape.name})
self._start_nested_param(param_section)
self.traverse_and_document_shape(
section=param_section, shape=param_shape,
history=history, name=param)
section = section.add_new_section('end-structure')
self._end_nested_param(section)
def _add_member_documentation(self, section, shape, **kwargs):
pass
def _add_members_to_shape(self, members, include):
if include:
members = members.copy()
for param in include:
members[param.name] = param
return members
def _start_nested_param(self, section):
section.style.indent()
section.style.new_line()
def _end_nested_param(self, section):
section.style.dedent()
section.style.new_line()
class ResponseParamsDocumenter(BaseParamsDocumenter):
"""Generates the description for the response parameters"""
EVENT_NAME = 'response-params'
def _add_member_documentation(self, section, shape, name=None, **kwargs):
py_type = py_type_name(shape.type_name)
name_section = section.add_new_section('param-name')
name_section.write('- ')
if name is not None:
name_section.style.bold('%s ' % name)
type_section = section.add_new_section('param-type')
type_section.style.italics('(%s) -- ' % py_type)
documentation_section = section.add_new_section('param-documentation')
if shape.documentation:
documentation_section.style.indent()
documentation_section.include_doc_string(shape.documentation)
section.style.new_paragraph()
class RequestParamsDocumenter(BaseParamsDocumenter):
"""Generates the description for the request parameters"""
EVENT_NAME = 'request-params'
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None, **kwargs):
if len(history) > 1:
self._add_member_documentation(section, shape, **kwargs)
section.style.indent()
members = self._add_members_to_shape(shape.members, include)
for i, param in enumerate(members):
if exclude and param in exclude:
continue
param_shape = members[param]
param_section = section.add_new_section(
param, context={'shape': param_shape.name})
param_section.style.new_line()
is_required = param in shape.required_members
self.traverse_and_document_shape(
section=param_section, shape=param_shape,
history=history, name=param, is_required=is_required)
section = section.add_new_section('end-structure')
if len(history) > 1:
section.style.dedent()
section.style.new_line()
def _add_member_documentation(self, section, shape, name=None,
is_top_level_param=False, is_required=False,
**kwargs):
py_type = py_type_name(shape.type_name)
if is_top_level_param:
type_section = section.add_new_section('param-type')
type_section.write(':type %s: %s' % (name, py_type))
end_type_section = type_section.add_new_section('end-param-type')
end_type_section.style.new_line()
name_section = section.add_new_section('param-name')
name_section.write(':param %s: ' % name)
else:
name_section = section.add_new_section('param-name')
name_section.write('- ')
if name is not None:
name_section.style.bold('%s ' % name)
type_section = section.add_new_section('param-type')
type_section.style.italics('(%s) -- ' % py_type)
if is_required:
is_required_section = section.add_new_section('is-required')
is_required_section.style.indent()
is_required_section.style.bold('[REQUIRED] ')
if shape.documentation:
documentation_section = section.add_new_section(
'param-documentation')
documentation_section.style.indent()
documentation_section.include_doc_string(shape.documentation)
end_param_section = section.add_new_section('end-param')
end_param_section.style.new_paragraph()
| apache-2.0 |
madoodia/codeLab | python/modules_os.py | 1 | 10636 | # ===============================================
# MODULE STUDY: os
import os
reload(os)
print '*----------------------------------------*'
path = 'C:/' # nt
path = '/' # linux
print os.stat(path)
print '*----------------------------------------*'
print os.error # <type 'exceptions.OSError'>
print os.error() # OSError()
print '*----------------------------------------*'
print os.name # 'posix', 'nt', 'os2', 'ce', 'java', 'riscos'
# you can see platform module too and sys.platform
################################ Process Parameters ################################
print '*----------------------------------------*'
print os.environ # A mapping object representing the string environment
print sorted(os.environ)
print os.environ['HOMEPATH']
print os.environ['PATH']
print os.environ['WINDIR']
print os.environ['USER']
print os.environ['NUMBER_OF_PROCESSORS']
print os.environ['MAYA_LOCATION']
print os.environ['PROCESSOR_ARCHITECTURE']
print os.environ['HOME']
print os.environ['USERNAME']
print os.environ['PYTHONPATH']
print os.environ['HOMEDRIVE']
print os.environ['MAYA_PLUG_IN_PATH']
print os.environ['OS']
for e in os.environ.keys():
print e, ":", os.environ[e]
# we can access too many information of OS with os.environ
print '*----------------------------------------*'
print os.getcwd() # E:\Madoodia\_Python\_learning_python
new_path = 'D:/'
os.chdir(new_path)
print os.getcwd() # D:/
print os.getpid() # Return the current process id.
print os.getenv('USERNAME') # Return the value of the environment variable varname if it exists
print os.getenv('NOT_EXISTS')
os.putenv(varname, value) # Set the environment variable named varname to the string value
os.strerror(code) # Return the error message corresponding to the error code in code
os.umask(mask) # Set the current numeric umask and return the previous umask.
os.uname() # Availability: Unix
os.unsetenv(varname) # Unset (delete) the environment variable named varname
################################ File Object Creation ################################
os.fdopen(fd[, mode[, bufsize]])
os.popen(command[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module
os.tmpfile() # Deprecated since version 2.6: All of the popen*() functions are obsolete. Use the subprocess module
os.popen2(cmd[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module
os.popen3(cmd[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module
os.popen4(cmd[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module
################################ File Descriptor Operations ################################
os.close(fd) # Close file descriptor fd.
os.closerange(fd_low, fd_high) # Close all file descriptors from fd_low (inclusive) to fd_high (exclusive), ignoring errors
os.dup(fd) # Return a duplicate of file descriptor fd.
os.dup2(fd, fd2) # Duplicate file descriptor fd to fd2
os.fstat(fd) # Return status for file descriptor fd, like stat().
os.fsync(fd) # Force write of file with filedescriptor fd to disk
os.isatty(fd) # Return True if the file descriptor fd is open and connected to a tty(-like) device, else False.
os.lseek(fd, pos, how) # Set the current position of file descriptor fd to position pos, modified by how: SEEK_SET or 0
os.open(file, flags[, mode]) # Open the file file and set various flags according to flags
os.pipe() # Create a pipe. Return a pair of file descriptors (r, w)
os.read(fd, n) # Read at most n bytes from file descriptor fd
os.write(fd, str) # Write the string str to file descriptor fd
################################ Files and Directories ################################
os.access(path, mode) # Use the real uid/gid to test for access to path
os.chdir(path) # Change the current working directory to path.
os.getcwd() # Return a string representing the current working directory.
os.getcwdu() # Return a Unicode object representing the current working directory.
os.chmod(path, mode) # Change the mode of path to the numeric mode
os.listdir(path) # Return a list containing the names of the entries in the directory given by path
os.lstat(path) # Perform the equivalent of an lstat() system call on the given path
os.mkdir(path[, mode]) # Create a directory named path with numeric mode mode
os.makedirs(path[, mode]) # Recursive directory creation function.
# Like mkdir(), but makes all intermediate-level directories needed to contain the leaf directory
os.remove(path) # Remove (delete) the file path. If path is a directory, OSError is raised; see rmdir() below to remove a directory
os.removedirs(path) # Remove directories recursively. Works like rmdir() except that, if the leaf directory is successfully removed
os.rename(src, dst) # Rename the file or directory src to dst. If dst is a directory, OSError will be raised
os.renames(old, new) # Recursive directory or file renaming function
os.rmdir(path) # Remove (delete) the directory path. Only works when the directory is empty, otherwise, OSError is raised
os.stat(path) # Perform the equivalent of a stat() system call on the given path
os.utime(path, times) # Set the access and modified times of the file specified by path
# Generate the file names in a directory tree by walking the tree either top-down or bottom-up
os.walk(top, topdown=True, onerror=None, followlinks=False)
################################ Process Management ################################
os.abort() # Generate a SIGABRT signal to the current process
os._exit(n) # Exit the process with status n # The standard way to exit is sys.exit(n)
os.startfile(path[, operation]) # Start a file with its associated application.
# The subprocess module provides more powerful facilities for spawning new processes and retrieving their results
os.system(command) # Execute the command (a string) in a subshell
os.times() # Return a 5-tuple of floating point numbers indicating accumulated (processor or other) times, in seconds
################################ Miscellaneous System Information ################################
os.curdir # The constant string used by the operating system to refer to the current directory
os.pardir # The constant string used by the operating system to refer to the parent directory
os.sep # The character used by the operating system to separate pathname components
os.altsep # An alternative character used by the operating system to separate pathname components
os.extsep # The character which separates the base filename from the extension
os.pathsep # The character conventionally used by the operating system to separate search path components
os.defpath # The default search path used by exec*p* and spawn*p* if the environment doesn’t have a 'PATH' key
os.linesep # The string used to separate (or, rather, terminate) lines on the current platform.
################################ Miscellaneous Functions ################################
os.urandom(n) # Return a string of n random bytes suitable for cryptographic use
# ****************************************** os.path *********************************************** #
# This module implements some useful functions on pathnames.
# To read or write files see open(), and for accessing the filesystem see the os module.
path = 'C:/Python27/Lib/site-packages/sip.pyd'
os.path.abspath(path) # Return a normalized absolutized version of the pathname path
os.path.basename(path) # Return the base name of pathname path
os.path.commonprefix(list) # Return the longest path prefix that is a prefix of all paths in list
os.path.dirname(path) # Return the directory name of pathname path
os.path.exists(path) # Return True if path refers to an existing path
os.path.lexists(path) # Return True if path refers to an existing path
os.path.expanduser(path) # On Unix and Windows, return the argument with an initial component of ~ or ~user replaced by that user‘s home directory.
os.path.expandvars(path) # Return the argument with environment variables expanded.
os.path.getatime(path) # Return the time of last access of path
os.path.getmtime(path) # Return the time of last modification of path
os.path.getctime(path) # Return the system’s ctime which, on some systems (like Unix) is the time of the last metadata change, and, on others (like Windows), is the creation time for path
os.path.getsize(path) # Return the size, in bytes, of path
os.path.isabs(path) # Return True if path is an absolute pathname
os.path.isfile(path) # Return True if path is an existing regular file
os.path.isdir(path) # Return True if path is an existing directory
os.path.islink(path) # Return True if path refers to a directory entry that is a symbolic link
os.path.join(path1[, path2[, ...]]) # Join one or more path components intelligently
os.path.normcase(path) # Normalize the case of a pathname
os.path.normpath(path) # Normalize a pathname by collapsing redundant separators and up-level references
os.path.realpath(path) # Return the canonical path of the specified filename
os.path.relpath(path[, start]) # Return a relative filepath to path either from the current directory or from an optional start directory.
os.path.samefile(path1, path2) # Return True if both pathname arguments refer to the same file or directory
os.path.split(path) # Split the pathname path into a pair, (head, tail) where tail is the last pathname component and head is everything leading up to that
os.path.splitdrive(path) # Split the pathname path into a pair (drive, tail) where drive is either a drive specification or the empty string.
os.path.splitext(path) # Split the pathname path into a pair (root, ext) such that root + ext == path
os.path.splitunc(path) # Split the pathname path into a pair (unc, rest) so that unc is the UNC mount point (such as r'\\host\mount'),
os.path.walk(path, visit, arg) # Calls the function visit with arguments (arg, dirname, names) for each directory in the directory tree rooted at path
| mit |
jinzo27/infoGrabr | lib/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/cvf.py | 34 | 2514 | """engine.SCons.Tool.cvf
Tool-specific initialization for the Compaq Visual Fortran compiler.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/cvf.py 5023 2010/06/14 22:05:46 scons"
import fortran
compilers = ['f90']
def generate(env):
"""Add Builders and construction variables for compaq visual fortran to an Environment."""
fortran.generate(env)
env['FORTRAN'] = 'f90'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['OBJSUFFIX'] = '.obj'
env['FORTRANMODDIR'] = '${TARGET.dir}'
env['FORTRANMODDIRPREFIX'] = '/module:'
env['FORTRANMODDIRSUFFIX'] = ''
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
marado/youtube-dl | test/test_jsinterp.py | 104 | 3483 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.jsinterp import JSInterpreter
class TestJSInterpreter(unittest.TestCase):
def test_basic(self):
jsi = JSInterpreter('function x(){;}')
self.assertEqual(jsi.call_function('x'), None)
jsi = JSInterpreter('function x3(){return 42;}')
self.assertEqual(jsi.call_function('x3'), 42)
def test_calc(self):
jsi = JSInterpreter('function x4(a){return 2*a+1;}')
self.assertEqual(jsi.call_function('x4', 3), 7)
def test_empty_return(self):
jsi = JSInterpreter('function f(){return; y()}')
self.assertEqual(jsi.call_function('f'), None)
def test_morespace(self):
jsi = JSInterpreter('function x (a) { return 2 * a + 1 ; }')
self.assertEqual(jsi.call_function('x', 3), 7)
jsi = JSInterpreter('function f () { x = 2 ; return x; }')
self.assertEqual(jsi.call_function('f'), 2)
def test_strange_chars(self):
jsi = JSInterpreter('function $_xY1 ($_axY1) { var $_axY2 = $_axY1 + 1; return $_axY2; }')
self.assertEqual(jsi.call_function('$_xY1', 20), 21)
def test_operators(self):
jsi = JSInterpreter('function f(){return 1 << 5;}')
self.assertEqual(jsi.call_function('f'), 32)
jsi = JSInterpreter('function f(){return 19 & 21;}')
self.assertEqual(jsi.call_function('f'), 17)
jsi = JSInterpreter('function f(){return 11 >> 2;}')
self.assertEqual(jsi.call_function('f'), 2)
def test_array_access(self):
jsi = JSInterpreter('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2] = 7; return x;}')
self.assertEqual(jsi.call_function('f'), [5, 2, 7])
def test_parens(self):
jsi = JSInterpreter('function f(){return (1) + (2) * ((( (( (((((3)))))) )) ));}')
self.assertEqual(jsi.call_function('f'), 7)
jsi = JSInterpreter('function f(){return (1 + 2) * 3;}')
self.assertEqual(jsi.call_function('f'), 9)
def test_assignments(self):
jsi = JSInterpreter('function f(){var x = 20; x = 30 + 1; return x;}')
self.assertEqual(jsi.call_function('f'), 31)
jsi = JSInterpreter('function f(){var x = 20; x += 30 + 1; return x;}')
self.assertEqual(jsi.call_function('f'), 51)
jsi = JSInterpreter('function f(){var x = 20; x -= 30 + 1; return x;}')
self.assertEqual(jsi.call_function('f'), -11)
def test_comments(self):
'Skipping: Not yet fully implemented'
return
jsi = JSInterpreter('''
function x() {
var x = /* 1 + */ 2;
var y = /* 30
* 40 */ 50;
return x + y;
}
''')
self.assertEqual(jsi.call_function('x'), 52)
jsi = JSInterpreter('''
function f() {
var x = "/*";
var y = 1 /* comment */ + 2;
return y;
}
''')
self.assertEqual(jsi.call_function('f'), 3)
def test_precedence(self):
jsi = JSInterpreter('''
function x() {
var a = [10, 20, 30, 40, 50];
var b = 6;
a[0]=a[b%a.length];
return a;
}''')
self.assertEqual(jsi.call_function('x'), [20, 20, 30, 40, 50])
if __name__ == '__main__':
unittest.main()
| unlicense |
andyparkins/mediarename | mediarename.py | 1 | 9035 | #!/usr/bin/python
# ----------------------------------------------------------------------------
# Project: mediarename
#
# Version Control
# $Author$
# $Date$
# $Id$
#
# Legal
# Copyright 2010 Andy Parkins
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Notes
# apt-get install python-tagpy
#
# ----------------------------------------------------------------------------
# ----- Includes
# Standard library
import sys
import os
import shutil
import subprocess
import locale
import time
import re
import codecs
from optparse import OptionParser
# Additional
from xml.dom import minidom
import tagpy
# ----- Constants
# ----- Class definitions
#
# Class:
# Description:
#
class Record:
pass
#
# Class:
# Description:
#
class TMediaRenameError(Exception):
pass
#
# Class: TMediaRename
# Description:
#
class TMediaRename:
#
# Function: __init__
# Description:
#
def __init__( self, argv ):
self.argv = argv
# Load the options record with default values
self.options = Record()
self.options.tracknums = False
self.options.includealbum = False
self.options.pathprefix = ''
self.options.mode = 'default-mode'
#
# Function: run
# Description:
#
def run( self ):
self.readCommandLine()
if self.options.verbose:
print >> sys.stderr, "mediarename: --- Verbose mode active"
print >> sys.stderr, self
if self.options.mode == 'default-mode':
print >> sys.stderr, "mediarename: --- Command line mode"
filemap = dict()
for file in self.positionalparameters:
filemap[file] = self.processFile(file)
if filemap[file] is None:
continue
if self.options.verbose:
print filemap[file]
try:
self.renameWithPathCreate( file, filemap[file] )
except Exception, e:
print >> sys.stderr, "mediarename: ERROR creating target:",e.args[0]
raise
elif self.options.mode == 'm3u':
print >> sys.stderr, "mediarename: --- Playlist mode"
filemap = dict()
for playlistfile in self.positionalparameters:
f = open(playlistfile, 'r')
n = 1
for file in f:
# Skip comments
if file[0] == '#':
continue
file = file.rstrip()
file = os.path.join(os.path.dirname(playlistfile), file)
filemap[file] = self.processFile(file, n)
if filemap[file] is None:
continue
n = n + 1
if self.options.verbose:
print filemap[file]
try:
self.renameWithPathCreate( file, filemap[file] )
except Exception, e:
print >> sys.stderr, "mediarename: ERROR creating target:",e.args[0]
raise
elif self.options.mode == 'testnormalise':
for teststring in self.positionalparameters:
print self.normalise( teststring )
#
# Function: renameWithPathCreate
# Description:
#
def renameWithPathCreate( self, fromfile, tofile ):
newdirname = os.path.dirname( tofile )
if not os.path.exists( newdirname ):
if self.options.verbose or self.options.dryrun:
print "mediarename: Creating directory '%s'" % (newdirname)
if not self.options.dryrun:
os.makedirs( newdirname )
if os.path.exists( tofile ):
print "mediarename: skipping existing target,", tofile
return
if self.options.move:
print "mediarename: '%s' -> '%s'" % (fromfile, tofile)
if not self.options.dryrun:
os.rename( fromfile, tofile )
else:
print "mediarename: '%s' => '%s'" % (fromfile, tofile)
if not self.options.dryrun:
shutil.copy2( fromfile, tofile )
#
# Function: processFile
# Description:
#
def processFile( self, file, n = None ):
if not os.path.exists( file ):
print >> sys.stderr, "mediarename: skipping non-existent file", file
return None
basename, extension = os.path.splitext(file)
try:
f = tagpy.FileRef( file )
tag = f.tag()
except ValueError, e:
print >> sys.stderr, "mediarename: skipping unsupported file type", file
return None
artist = self.normalise(tag.artist)
title = self.normalise(tag.title)
album = self.normalise(tag.album)
track = tag.track
year = tag.year
if track is None and n is not None:
track = n
newname = self.createNewName(artist, album, track, title, extension, year)
if self.options.verbose:
print "%s/%s/%d/%s -> " % (tag.artist,tag.album,tag.track,tag.title),
return newname
#
# Function: createNewName
# Description:
#
def createNewName( self, artist, album, track, title, extension, year ):
formatString = []
variableList = []
if self.options.pathprefix:
variableList.append( self.options.pathprefix )
formatString.append( "%s/" )
variableList.append( artist )
formatString.append( "%s/" )
if self.options.includealbum:
if self.options.tracknums and year is not None:
variableList.append(year)
formatString.append("%04.4d_")
variableList.append(album)
formatString.append("%s/")
if self.options.tracknums:
variableList.append(track)
formatString.append("%03.3d_")
variableList.append(title)
formatString.append("%s")
variableList.append(extension)
formatString.append("%s")
return ''.join(formatString) % tuple( variableList )
#
# Function: normalise
# Description:
#
def normalise( self, name ):
# newname = re.sub(r'\[(.+?)\]: *<(.+)>', r'[\1]: \2', newname )
newname = name
# Separators to spaces
newname = re.sub(r'[ \(\),;:|\.!\?\-_\\\/]+', r' ', newname )
# Contract punctuation
newname = re.sub(r'[\'"]+', r'', newname )
# Ampersand
newname = re.sub(r'&', r'And', newname )
# Titlecase
newname = newname.title()
# Move "the" to the end
newname = re.sub(r'^The (.+)', r'\1The', newname )
# Strip remaining spaces
newname = re.sub(r' ', r'', newname )
return newname
#
# Function: readCommandLine
# Description:
# Parse the command line with OptionParser; which supplies all the
# niceties for us (like --help, --version and validating the inputs)
#
def readCommandLine( self ):
# Configure parser
parser = OptionParser(
usage="usage: %prog [options] <file> <file> ...",
version="%prog 1.0")
# "-h", "--help" supplied automatically by OptionParser
parser.add_option( "-v", "--verbose", dest="verbose",
action="store_true",
help="show verbose output")
parser.add_option( "-p", "--path", dest="pathprefix",
metavar="PATH", type='string', default=self.options.pathprefix,
help="use PATH rather than the current directory as target")
parser.add_option( "-y", "--m3u", dest="mode",
action="store_const", const="m3u",
help="files are m3u playlist files" )
parser.add_option( "-n", "--tracknums", dest="tracknums",
action="store_true",
help="Prefix track names with the track number")
parser.add_option( "-a", "--albums", dest="includealbum",
action="store_true",
help="Include the album name as part of the path")
parser.add_option( "-d", "--dry-run", dest="dryrun",
action="store_true",
help="Don't actually change anything, implies verbose")
parser.add_option( "-m", "--move", dest="move",
action="store_true",
help="Move instead of copy")
parser.add_option( "", "--test", dest="mode",
action="store_const", const="testnormalise",
help="Test the normalisation patterns")
parser.set_defaults(mode=self.options.mode, \
tracknums=self.options.tracknums, \
includealbum=self.options.includealbum
)
# Run the parser
(self.options, args) = parser.parse_args( self.argv[1:] )
# Copy the positional arguments into self
self.positionalparameters = args
#
# Function: __str__
# Description:
# Dump the contents of this class to a string
#
def __str__( self ) :
s = repr(self) + "\n";
for var in self.__dict__ :
s = s + " - " + var + " = " + str(self.__dict__[var]) + "\n"
return s
# ----- Main
#
# Function: main
# Description:
#
def main( argv = None ):
# Default arguments from command line
if argv is None:
argv = sys.argv
# Locale
locale.setlocale( locale.LC_ALL, '' );
app = TMediaRename( argv )
# --- Begin
try:
app.run()
# Simply display TMediaRenameErrors
except TMediaRenameError, e:
print >> sys.stderr, "mediarename: ERROR:",e.args[0]
except KeyboardInterrupt, e:
print >> sys.stderr, "mediarename: Aborted from keyboard"
# ----- Module check
#
# __name__ is set to "__main__" when this is the top module
# if this module is loaded because of an "import" then this
# won't get run -- perfect
if __name__ == "__main__":
sys.exit( main() )
| gpl-3.0 |
cuixin/ssdb | deps/cpy/antlr3/streams.py | 98 | 43606 | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
import codecs
from StringIO import StringIO
from antlr3.constants import DEFAULT_CHANNEL, EOF
from antlr3.tokens import Token, EOF_TOKEN
############################################################################
#
# basic interfaces
# IntStream
# +- CharStream
# \- TokenStream
#
# subclasses must implemented all methods
#
############################################################################
class IntStream(object):
"""
@brief Base interface for streams of integer values.
A simple stream of integers used when all I care about is the char
or token type sequence (such as interpretation).
"""
def consume(self):
raise NotImplementedError
def LA(self, i):
"""Get int at current input pointer + i ahead where i=1 is next int.
Negative indexes are allowed. LA(-1) is previous token (token
just matched). LA(-i) where i is before first token should
yield -1, invalid char / EOF.
"""
raise NotImplementedError
def mark(self):
"""
Tell the stream to start buffering if it hasn't already. Return
current input position, index(), or some other marker so that
when passed to rewind() you get back to the same spot.
rewind(mark()) should not affect the input cursor. The Lexer
track line/col info as well as input index so its markers are
not pure input indexes. Same for tree node streams.
"""
raise NotImplementedError
def index(self):
"""
Return the current input symbol index 0..n where n indicates the
last symbol has been read. The index is the symbol about to be
read not the most recently read symbol.
"""
raise NotImplementedError
def rewind(self, marker=None):
"""
Reset the stream so that next call to index would return marker.
The marker will usually be index() but it doesn't have to be. It's
just a marker to indicate what state the stream was in. This is
essentially calling release() and seek(). If there are markers
created after this marker argument, this routine must unroll them
like a stack. Assume the state the stream was in when this marker
was created.
If marker is None:
Rewind to the input position of the last marker.
Used currently only after a cyclic DFA and just
before starting a sem/syn predicate to get the
input position back to the start of the decision.
Do not "pop" the marker off the state. mark(i)
and rewind(i) should balance still. It is
like invoking rewind(last marker) but it should not "pop"
the marker off. It's like seek(last marker's input position).
"""
raise NotImplementedError
def release(self, marker=None):
"""
You may want to commit to a backtrack but don't want to force the
stream to keep bookkeeping objects around for a marker that is
no longer necessary. This will have the same behavior as
rewind() except it releases resources without the backward seek.
This must throw away resources for all markers back to the marker
argument. So if you're nested 5 levels of mark(), and then release(2)
you have to release resources for depths 2..5.
"""
raise NotImplementedError
def seek(self, index):
"""
Set the input cursor to the position indicated by index. This is
normally used to seek ahead in the input stream. No buffering is
required to do this unless you know your stream will use seek to
move backwards such as when backtracking.
This is different from rewind in its multi-directional
requirement and in that its argument is strictly an input cursor
(index).
For char streams, seeking forward must update the stream state such
as line number. For seeking backwards, you will be presumably
backtracking using the mark/rewind mechanism that restores state and
so this method does not need to update state when seeking backwards.
Currently, this method is only used for efficient backtracking using
memoization, but in the future it may be used for incremental parsing.
The index is 0..n-1. A seek to position i means that LA(1) will
return the ith symbol. So, seeking to 0 means LA(1) will return the
first element in the stream.
"""
raise NotImplementedError
def size(self):
"""
Only makes sense for streams that buffer everything up probably, but
might be useful to display the entire stream or for testing. This
value includes a single EOF.
"""
raise NotImplementedError
def getSourceName(self):
"""
Where are you getting symbols from? Normally, implementations will
pass the buck all the way to the lexer who can ask its input stream
for the file name or whatever.
"""
raise NotImplementedError
class CharStream(IntStream):
"""
@brief A source of characters for an ANTLR lexer.
This is an abstract class that must be implemented by a subclass.
"""
# pylint does not realize that this is an interface, too
#pylint: disable-msg=W0223
EOF = -1
def substring(self, start, stop):
"""
For infinite streams, you don't need this; primarily I'm providing
a useful interface for action code. Just make sure actions don't
use this on streams that don't support it.
"""
raise NotImplementedError
def LT(self, i):
"""
Get the ith character of lookahead. This is the same usually as
LA(i). This will be used for labels in the generated
lexer code. I'd prefer to return a char here type-wise, but it's
probably better to be 32-bit clean and be consistent with LA.
"""
raise NotImplementedError
def getLine(self):
"""ANTLR tracks the line information automatically"""
raise NotImplementedError
def setLine(self, line):
"""
Because this stream can rewind, we need to be able to reset the line
"""
raise NotImplementedError
def getCharPositionInLine(self):
"""
The index of the character relative to the beginning of the line 0..n-1
"""
raise NotImplementedError
def setCharPositionInLine(self, pos):
raise NotImplementedError
class TokenStream(IntStream):
"""
@brief A stream of tokens accessing tokens from a TokenSource
This is an abstract class that must be implemented by a subclass.
"""
# pylint does not realize that this is an interface, too
#pylint: disable-msg=W0223
def LT(self, k):
"""
Get Token at current input pointer + i ahead where i=1 is next Token.
i<0 indicates tokens in the past. So -1 is previous token and -2 is
two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken.
Return null for LT(0) and any index that results in an absolute address
that is negative.
"""
raise NotImplementedError
def get(self, i):
"""
Get a token at an absolute index i; 0..n-1. This is really only
needed for profiling and debugging and token stream rewriting.
If you don't want to buffer up tokens, then this method makes no
sense for you. Naturally you can't use the rewrite stream feature.
I believe DebugTokenStream can easily be altered to not use
this method, removing the dependency.
"""
raise NotImplementedError
def getTokenSource(self):
"""
Where is this stream pulling tokens from? This is not the name, but
the object that provides Token objects.
"""
raise NotImplementedError
def toString(self, start=None, stop=None):
"""
Return the text of all tokens from start to stop, inclusive.
If the stream does not buffer all the tokens then it can just
return "" or null; Users should not access $ruleLabel.text in
an action of course in that case.
Because the user is not required to use a token with an index stored
in it, we must provide a means for two token objects themselves to
indicate the start/end location. Most often this will just delegate
to the other toString(int,int). This is also parallel with
the TreeNodeStream.toString(Object,Object).
"""
raise NotImplementedError
############################################################################
#
# character streams for use in lexers
# CharStream
# \- ANTLRStringStream
#
############################################################################
class ANTLRStringStream(CharStream):
"""
@brief CharStream that pull data from a unicode string.
A pretty quick CharStream that pulls all data from an array
directly. Every method call counts in the lexer.
"""
def __init__(self, data):
"""
@param data This should be a unicode string holding the data you want
to parse. If you pass in a byte string, the Lexer will choke on
non-ascii data.
"""
CharStream.__init__(self)
# The data being scanned
self.strdata = unicode(data)
self.data = [ord(c) for c in self.strdata]
# How many characters are actually in the buffer
self.n = len(data)
# 0..n-1 index into string of next char
self.p = 0
# line number 1..n within the input
self.line = 1
# The index of the character relative to the beginning of the
# line 0..n-1
self.charPositionInLine = 0
# A list of CharStreamState objects that tracks the stream state
# values line, charPositionInLine, and p that can change as you
# move through the input stream. Indexed from 0..markDepth-1.
self._markers = [ ]
self.lastMarker = None
self.markDepth = 0
# What is name or source of this char stream?
self.name = None
def reset(self):
"""
Reset the stream so that it's in the same state it was
when the object was created *except* the data array is not
touched.
"""
self.p = 0
self.line = 1
self.charPositionInLine = 0
self._markers = [ ]
def consume(self):
try:
if self.data[self.p] == 10: # \n
self.line += 1
self.charPositionInLine = 0
else:
self.charPositionInLine += 1
self.p += 1
except IndexError:
# happend when we reached EOF and self.data[self.p] fails
# just do nothing
pass
def LA(self, i):
if i == 0:
return 0 # undefined
if i < 0:
i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
try:
return self.data[self.p+i-1]
except IndexError:
return EOF
def LT(self, i):
if i == 0:
return 0 # undefined
if i < 0:
i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
try:
return self.strdata[self.p+i-1]
except IndexError:
return EOF
def index(self):
"""
Return the current input symbol index 0..n where n indicates the
last symbol has been read. The index is the index of char to
be returned from LA(1).
"""
return self.p
def size(self):
return self.n
def mark(self):
state = (self.p, self.line, self.charPositionInLine)
try:
self._markers[self.markDepth] = state
except IndexError:
self._markers.append(state)
self.markDepth += 1
self.lastMarker = self.markDepth
return self.lastMarker
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
p, line, charPositionInLine = self._markers[marker-1]
self.seek(p)
self.line = line
self.charPositionInLine = charPositionInLine
self.release(marker)
def release(self, marker=None):
if marker is None:
marker = self.lastMarker
self.markDepth = marker-1
def seek(self, index):
"""
consume() ahead until p==index; can't just set p=index as we must
update line and charPositionInLine.
"""
if index <= self.p:
self.p = index # just jump; don't update stream state (line, ...)
return
# seek forward, consume until p hits index
while self.p < index:
self.consume()
def substring(self, start, stop):
return self.strdata[start:stop+1]
def getLine(self):
"""Using setter/getter methods is deprecated. Use o.line instead."""
return self.line
def getCharPositionInLine(self):
"""
Using setter/getter methods is deprecated. Use o.charPositionInLine
instead.
"""
return self.charPositionInLine
def setLine(self, line):
"""Using setter/getter methods is deprecated. Use o.line instead."""
self.line = line
def setCharPositionInLine(self, pos):
"""
Using setter/getter methods is deprecated. Use o.charPositionInLine
instead.
"""
self.charPositionInLine = pos
def getSourceName(self):
return self.name
class ANTLRFileStream(ANTLRStringStream):
"""
@brief CharStream that opens a file to read the data.
This is a char buffer stream that is loaded from a file
all at once when you construct the object.
"""
def __init__(self, fileName, encoding=None):
"""
@param fileName The path to the file to be opened. The file will be
opened with mode 'rb'.
@param encoding If you set the optional encoding argument, then the
data will be decoded on the fly.
"""
self.fileName = fileName
fp = codecs.open(fileName, 'rb', encoding)
try:
data = fp.read()
finally:
fp.close()
ANTLRStringStream.__init__(self, data)
def getSourceName(self):
"""Deprecated, access o.fileName directly."""
return self.fileName
class ANTLRInputStream(ANTLRStringStream):
"""
@brief CharStream that reads data from a file-like object.
This is a char buffer stream that is loaded from a file like object
all at once when you construct the object.
All input is consumed from the file, but it is not closed.
"""
def __init__(self, file, encoding=None):
"""
@param file A file-like object holding your input. Only the read()
method must be implemented.
@param encoding If you set the optional encoding argument, then the
data will be decoded on the fly.
"""
if encoding is not None:
# wrap input in a decoding reader
reader = codecs.lookup(encoding)[2]
file = reader(file)
data = file.read()
ANTLRStringStream.__init__(self, data)
# I guess the ANTLR prefix exists only to avoid a name clash with some Java
# mumbojumbo. A plain "StringStream" looks better to me, which should be
# the preferred name in Python.
StringStream = ANTLRStringStream
FileStream = ANTLRFileStream
InputStream = ANTLRInputStream
############################################################################
#
# Token streams
# TokenStream
# +- CommonTokenStream
# \- TokenRewriteStream
#
############################################################################
class CommonTokenStream(TokenStream):
"""
@brief The most common stream of tokens
The most common stream of tokens is one where every token is buffered up
and tokens are prefiltered for a certain channel (the parser will only
see these tokens and cannot change the filter channel number during the
parse).
"""
def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
"""
@param tokenSource A TokenSource instance (usually a Lexer) to pull
the tokens from.
@param channel Skip tokens on any channel but this one; this is how we
skip whitespace...
"""
TokenStream.__init__(self)
self.tokenSource = tokenSource
# Record every single token pulled from the source so we can reproduce
# chunks of it later.
self.tokens = []
# Map<tokentype, channel> to override some Tokens' channel numbers
self.channelOverrideMap = {}
# Set<tokentype>; discard any tokens with this type
self.discardSet = set()
# Skip tokens on any channel but this one; this is how we skip whitespace...
self.channel = channel
# By default, track all incoming tokens
self.discardOffChannelTokens = False
# The index into the tokens list of the current token (next token
# to consume). p==-1 indicates that the tokens list is empty
self.p = -1
# Remember last marked position
self.lastMarker = None
def setTokenSource(self, tokenSource):
"""Reset this token stream by setting its token source."""
self.tokenSource = tokenSource
self.tokens = []
self.p = -1
self.channel = DEFAULT_CHANNEL
def reset(self):
self.p = 0
self.lastMarker = None
def fillBuffer(self):
"""
Load all tokens from the token source and put in tokens.
This is done upon first LT request because you might want to
set some token type / channel overrides before filling buffer.
"""
index = 0
t = self.tokenSource.nextToken()
while t is not None and t.type != EOF:
discard = False
if self.discardSet is not None and t.type in self.discardSet:
discard = True
elif self.discardOffChannelTokens and t.channel != self.channel:
discard = True
# is there a channel override for token type?
try:
overrideChannel = self.channelOverrideMap[t.type]
except KeyError:
# no override for this type
pass
else:
if overrideChannel == self.channel:
t.channel = overrideChannel
else:
discard = True
if not discard:
t.index = index
self.tokens.append(t)
index += 1
t = self.tokenSource.nextToken()
# leave p pointing at first token on channel
self.p = 0
self.p = self.skipOffTokenChannels(self.p)
def consume(self):
"""
Move the input pointer to the next incoming token. The stream
must become active with LT(1) available. consume() simply
moves the input pointer so that LT(1) points at the next
input symbol. Consume at least one token.
Walk past any token not on the channel the parser is listening to.
"""
if self.p < len(self.tokens):
self.p += 1
self.p = self.skipOffTokenChannels(self.p) # leave p on valid token
def skipOffTokenChannels(self, i):
"""
Given a starting index, return the index of the first on-channel
token.
"""
try:
while self.tokens[i].channel != self.channel:
i += 1
except IndexError:
# hit the end of token stream
pass
return i
def skipOffTokenChannelsReverse(self, i):
while i >= 0 and self.tokens[i].channel != self.channel:
i -= 1
return i
def setTokenTypeChannel(self, ttype, channel):
"""
A simple filter mechanism whereby you can tell this token stream
to force all tokens of type ttype to be on channel. For example,
when interpreting, we cannot exec actions so we need to tell
the stream to force all WS and NEWLINE to be a different, ignored
channel.
"""
self.channelOverrideMap[ttype] = channel
def discardTokenType(self, ttype):
self.discardSet.add(ttype)
def getTokens(self, start=None, stop=None, types=None):
"""
Given a start and stop index, return a list of all tokens in
the token type set. Return None if no tokens were found. This
method looks at both on and off channel tokens.
"""
if self.p == -1:
self.fillBuffer()
if stop is None or stop >= len(self.tokens):
stop = len(self.tokens) - 1
if start is None or stop < 0:
start = 0
if start > stop:
return None
if isinstance(types, (int, long)):
# called with a single type, wrap into set
types = set([types])
filteredTokens = [
token for token in self.tokens[start:stop]
if types is None or token.type in types
]
if len(filteredTokens) == 0:
return None
return filteredTokens
def LT(self, k):
"""
Get the ith token from the current position 1..n where k=1 is the
first symbol of lookahead.
"""
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if k < 0:
return self.LB(-k)
i = self.p
n = 1
# find k good tokens
while n < k:
# skip off-channel tokens
i = self.skipOffTokenChannels(i+1) # leave p on valid token
n += 1
try:
return self.tokens[i]
except IndexError:
return EOF_TOKEN
def LB(self, k):
"""Look backwards k tokens on-channel tokens"""
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if self.p - k < 0:
return None
i = self.p
n = 1
# find k good tokens looking backwards
while n <= k:
# skip off-channel tokens
i = self.skipOffTokenChannelsReverse(i-1) # leave p on valid token
n += 1
if i < 0:
return None
return self.tokens[i]
def get(self, i):
"""
Return absolute token i; ignore which channel the tokens are on;
that is, count all tokens not just on-channel tokens.
"""
return self.tokens[i]
def LA(self, i):
return self.LT(i).type
def mark(self):
self.lastMarker = self.index()
return self.lastMarker
def release(self, marker=None):
# no resources to release
pass
def size(self):
return len(self.tokens)
def index(self):
return self.p
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
self.seek(marker)
def seek(self, index):
self.p = index
def getTokenSource(self):
return self.tokenSource
def getSourceName(self):
return self.tokenSource.getSourceName()
def toString(self, start=None, stop=None):
if self.p == -1:
self.fillBuffer()
if start is None:
start = 0
elif not isinstance(start, int):
start = start.index
if stop is None:
stop = len(self.tokens) - 1
elif not isinstance(stop, int):
stop = stop.index
if stop >= len(self.tokens):
stop = len(self.tokens) - 1
return ''.join([t.text for t in self.tokens[start:stop+1]])
class RewriteOperation(object):
"""@brief Internal helper class."""
def __init__(self, stream, index, text):
self.stream = stream
self.index = index
self.text = text
def execute(self, buf):
"""Execute the rewrite operation by possibly adding to the buffer.
Return the index of the next token to operate on.
"""
return self.index
def toString(self):
opName = self.__class__.__name__
return '<%s@%d:"%s">' % (opName, self.index, self.text)
__str__ = toString
__repr__ = toString
class InsertBeforeOp(RewriteOperation):
"""@brief Internal helper class."""
def execute(self, buf):
buf.write(self.text)
buf.write(self.stream.tokens[self.index].text)
return self.index + 1
class ReplaceOp(RewriteOperation):
"""
@brief Internal helper class.
I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
instructions.
"""
def __init__(self, stream, first, last, text):
RewriteOperation.__init__(self, stream, first, text)
self.lastIndex = last
def execute(self, buf):
if self.text is not None:
buf.write(self.text)
return self.lastIndex + 1
def toString(self):
return '<ReplaceOp@%d..%d:"%s">' % (
self.index, self.lastIndex, self.text)
__str__ = toString
__repr__ = toString
class DeleteOp(ReplaceOp):
"""
@brief Internal helper class.
"""
def __init__(self, stream, first, last):
ReplaceOp.__init__(self, stream, first, last, None)
def toString(self):
return '<DeleteOp@%d..%d>' % (self.index, self.lastIndex)
__str__ = toString
__repr__ = toString
class TokenRewriteStream(CommonTokenStream):
"""@brief CommonTokenStream that can be modified.
Useful for dumping out the input stream after doing some
augmentation or other manipulations.
You can insert stuff, replace, and delete chunks. Note that the
operations are done lazily--only if you convert the buffer to a
String. This is very efficient because you are not moving data around
all the time. As the buffer of tokens is converted to strings, the
toString() method(s) check to see if there is an operation at the
current index. If so, the operation is done and then normal String
rendering continues on the buffer. This is like having multiple Turing
machine instruction streams (programs) operating on a single input tape. :)
Since the operations are done lazily at toString-time, operations do not
screw up the token index values. That is, an insert operation at token
index i does not change the index values for tokens i+1..n-1.
Because operations never actually alter the buffer, you may always get
the original token stream back without undoing anything. Since
the instructions are queued up, you can easily simulate transactions and
roll back any changes if there is an error just by removing instructions.
For example,
CharStream input = new ANTLRFileStream("input");
TLexer lex = new TLexer(input);
TokenRewriteStream tokens = new TokenRewriteStream(lex);
T parser = new T(tokens);
parser.startRule();
Then in the rules, you can execute
Token t,u;
...
input.insertAfter(t, "text to put after t");}
input.insertAfter(u, "text after u");}
System.out.println(tokens.toString());
Actually, you have to cast the 'input' to a TokenRewriteStream. :(
You can also have multiple "instruction streams" and get multiple
rewrites from a single pass over the input. Just name the instruction
streams and use that name again when printing the buffer. This could be
useful for generating a C file and also its header file--all from the
same buffer:
tokens.insertAfter("pass1", t, "text to put after t");}
tokens.insertAfter("pass2", u, "text after u");}
System.out.println(tokens.toString("pass1"));
System.out.println(tokens.toString("pass2"));
If you don't use named rewrite streams, a "default" stream is used as
the first example shows.
"""
DEFAULT_PROGRAM_NAME = "default"
MIN_TOKEN_INDEX = 0
def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
CommonTokenStream.__init__(self, tokenSource, channel)
# You may have multiple, named streams of rewrite operations.
# I'm calling these things "programs."
# Maps String (name) -> rewrite (List)
self.programs = {}
self.programs[self.DEFAULT_PROGRAM_NAME] = []
# Map String (program name) -> Integer index
self.lastRewriteTokenIndexes = {}
def rollback(self, *args):
"""
Rollback the instruction stream for a program so that
the indicated instruction (via instructionIndex) is no
longer in the stream. UNTESTED!
"""
if len(args) == 2:
programName = args[0]
instructionIndex = args[1]
elif len(args) == 1:
programName = self.DEFAULT_PROGRAM_NAME
instructionIndex = args[0]
else:
raise TypeError("Invalid arguments")
p = self.programs.get(programName, None)
if p is not None:
self.programs[programName] = (
p[self.MIN_TOKEN_INDEX:instructionIndex])
def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME):
"""Reset the program so that no instructions exist"""
self.rollback(programName, self.MIN_TOKEN_INDEX)
def insertAfter(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
index = args[0]
text = args[1]
elif len(args) == 3:
programName = args[0]
index = args[1]
text = args[2]
else:
raise TypeError("Invalid arguments")
if isinstance(index, Token):
# index is a Token, grap the stream index from it
index = index.index
# to insert after, just insert before next index (even if past end)
self.insertBefore(programName, index+1, text)
def insertBefore(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
index = args[0]
text = args[1]
elif len(args) == 3:
programName = args[0]
index = args[1]
text = args[2]
else:
raise TypeError("Invalid arguments")
if isinstance(index, Token):
# index is a Token, grap the stream index from it
index = index.index
op = InsertBeforeOp(self, index, text)
rewrites = self.getProgram(programName)
rewrites.append(op)
def replace(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
first = args[0]
last = args[0]
text = args[1]
elif len(args) == 3:
programName = self.DEFAULT_PROGRAM_NAME
first = args[0]
last = args[1]
text = args[2]
elif len(args) == 4:
programName = args[0]
first = args[1]
last = args[2]
text = args[3]
else:
raise TypeError("Invalid arguments")
if isinstance(first, Token):
# first is a Token, grap the stream index from it
first = first.index
if isinstance(last, Token):
# last is a Token, grap the stream index from it
last = last.index
if first > last or first < 0 or last < 0 or last >= len(self.tokens):
raise ValueError(
"replace: range invalid: "+first+".."+last+
"(size="+len(self.tokens)+")")
op = ReplaceOp(self, first, last, text)
rewrites = self.getProgram(programName)
rewrites.append(op)
def delete(self, *args):
self.replace(*(list(args) + [None]))
def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME):
return self.lastRewriteTokenIndexes.get(programName, -1)
def setLastRewriteTokenIndex(self, programName, i):
self.lastRewriteTokenIndexes[programName] = i
def getProgram(self, name):
p = self.programs.get(name, None)
if p is None:
p = self.initializeProgram(name)
return p
def initializeProgram(self, name):
p = []
self.programs[name] = p
return p
def toOriginalString(self, start=None, end=None):
if start is None:
start = self.MIN_TOKEN_INDEX
if end is None:
end = self.size() - 1
buf = StringIO()
i = start
while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
buf.write(self.get(i).text)
i += 1
return buf.getvalue()
def toString(self, *args):
if len(args) == 0:
programName = self.DEFAULT_PROGRAM_NAME
start = self.MIN_TOKEN_INDEX
end = self.size() - 1
elif len(args) == 1:
programName = args[0]
start = self.MIN_TOKEN_INDEX
end = self.size() - 1
elif len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
start = args[0]
end = args[1]
if start is None:
start = self.MIN_TOKEN_INDEX
elif not isinstance(start, int):
start = start.index
if end is None:
end = len(self.tokens) - 1
elif not isinstance(end, int):
end = end.index
# ensure start/end are in range
if end >= len(self.tokens):
end = len(self.tokens) - 1
if start < 0:
start = 0
rewrites = self.programs.get(programName)
if rewrites is None or len(rewrites) == 0:
# no instructions to execute
return self.toOriginalString(start, end)
buf = StringIO()
# First, optimize instruction stream
indexToOp = self.reduceToSingleOperationPerIndex(rewrites)
# Walk buffer, executing instructions and emitting tokens
i = start
while i <= end and i < len(self.tokens):
op = indexToOp.get(i)
# remove so any left have index size-1
try:
del indexToOp[i]
except KeyError:
pass
t = self.tokens[i]
if op is None:
# no operation at that index, just dump token
buf.write(t.text)
i += 1 # move to next token
else:
i = op.execute(buf) # execute operation and skip
# include stuff after end if it's last index in buffer
# So, if they did an insertAfter(lastValidIndex, "foo"), include
# foo if end==lastValidIndex.
if end == len(self.tokens) - 1:
# Scan any remaining operations after last token
# should be included (they will be inserts).
for i in sorted(indexToOp.keys()):
op = indexToOp[i]
if op.index >= len(self.tokens)-1:
buf.write(op.text)
return buf.getvalue()
__str__ = toString
def reduceToSingleOperationPerIndex(self, rewrites):
"""
We need to combine operations and report invalid operations (like
overlapping replaces that are not completed nested). Inserts to
same index need to be combined etc... Here are the cases:
I.i.u I.j.v leave alone, nonoverlapping
I.i.u I.i.v combine: Iivu
R.i-j.u R.x-y.v | i-j in x-y delete first R
R.i-j.u R.i-j.v delete first R
R.i-j.u R.x-y.v | x-y in i-j ERROR
R.i-j.u R.x-y.v | boundaries overlap ERROR
I.i.u R.x-y.v | i in x-y delete I
I.i.u R.x-y.v | i not in x-y leave alone, nonoverlapping
R.x-y.v I.i.u | i in x-y ERROR
R.x-y.v I.x.u R.x-y.uv (combine, delete I)
R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
I.i.u = insert u before op @ index i
R.x-y.u = replace x-y indexed tokens with u
First we need to examine replaces. For any replace op:
1. wipe out any insertions before op within that range.
2. Drop any replace op before that is contained completely within
that range.
3. Throw exception upon boundary overlap with any previous replace.
Then we can deal with inserts:
1. for any inserts to same index, combine even if not adjacent.
2. for any prior replace with same left boundary, combine this
insert with replace and delete this replace.
3. throw exception if index in same range as previous replace
Don't actually delete; make op null in list. Easier to walk list.
Later we can throw as we add to index -> op map.
Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
inserted stuff would be before the replace range. But, if you
add tokens in front of a method body '{' and then delete the method
body, I think the stuff before the '{' you added should disappear too.
Return a map from token index to operation.
"""
# WALK REPLACES
for i, rop in enumerate(rewrites):
if rop is None:
continue
if not isinstance(rop, ReplaceOp):
continue
# Wipe prior inserts within range
for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
if iop.index >= rop.index and iop.index <= rop.lastIndex:
rewrites[j] = None # delete insert as it's a no-op.
# Drop any prior replaces contained within
for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i):
if (prevRop.index >= rop.index
and prevRop.lastIndex <= rop.lastIndex):
rewrites[j] = None # delete replace as it's a no-op.
continue
# throw exception unless disjoint or identical
disjoint = (prevRop.lastIndex < rop.index
or prevRop.index > rop.lastIndex)
same = (prevRop.index == rop.index
and prevRop.lastIndex == rop.lastIndex)
if not disjoint and not same:
raise ValueError(
"replace op boundaries of %s overlap with previous %s"
% (rop, prevRop))
# WALK INSERTS
for i, iop in enumerate(rewrites):
if iop is None:
continue
if not isinstance(iop, InsertBeforeOp):
continue
# combine current insert with prior if any at same index
for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
if prevIop.index == iop.index: # combine objects
# convert to strings...we're in process of toString'ing
# whole token buffer so no lazy eval issue with any
# templates
iop.text = self.catOpText(iop.text, prevIop.text)
rewrites[j] = None # delete redundant prior insert
# look for replaces where iop.index is in range; error
for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i):
if iop.index == rop.index:
rop.text = self.catOpText(iop.text, rop.text)
rewrites[i] = None # delete current insert
continue
if iop.index >= rop.index and iop.index <= rop.lastIndex:
raise ValueError(
"insert op %s within boundaries of previous %s"
% (iop, rop))
m = {}
for i, op in enumerate(rewrites):
if op is None:
continue # ignore deleted ops
assert op.index not in m, "should only be one op per index"
m[op.index] = op
return m
def catOpText(self, a, b):
x = ""
y = ""
if a is not None:
x = a
if b is not None:
y = b
return x + y
def getKindOfOps(self, rewrites, kind, before=None):
if before is None:
before = len(rewrites)
elif before > len(rewrites):
before = len(rewrites)
for i, op in enumerate(rewrites[:before]):
if op is None:
# ignore deleted
continue
if op.__class__ == kind:
yield i, op
def toDebugString(self, start=None, end=None):
if start is None:
start = self.MIN_TOKEN_INDEX
if end is None:
end = self.size() - 1
buf = StringIO()
i = start
while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
buf.write(self.get(i))
i += 1
return buf.getvalue()
| bsd-3-clause |
xenserver/xen-4.3 | tools/python/xen/web/resource.py | 52 | 3390 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2005 Mike Wray <mike.wray@hp.com>
#============================================================================
import http
def findResource(resource, request):
"""Traverse resource tree to find who will handle the request."""
while request.postpath and not resource.isLeaf:
#print 'findResource:', resource, request.postpath
pathElement = request.postpath.pop(0)
request.prepath.append(pathElement)
next = resource.getPathResource(pathElement, request)
if not next: break
resource = next
return resource
class Resource:
isLeaf = False
def __init__(self):
self.children = {}
def getRequestResource(self, req):
return findResource(self, req)
def getChild(self, path, request):
return None
def getPathResource(self, path, request):
#print 'getPathResource>', self, path
if self.children.has_key(path):
val = self.children[path]
else:
val = self.getChild(path, request)
#print 'getPathResource<', val
return val
def putChild(self, path, child):
self.children[path] = child
#child.server = self.server
def render(self, req):
meth = getattr(self, 'render_' + req.getRequestMethod(), self.unsupported)
return meth(req)
def supportedMethods(self):
l = []
s = 'render_'
for x in dir(self):
if x.startswith(s):
l.append(x[len(s):])
return l
def render_HEAD(self, req):
return self.render_GET(req)
def render_GET(self, req):
req.setContentType("text/plain")
req.write("GET")
def render_POST(self, req):
req.setContentType("text/plain")
req.write("POST")
def unsupported(self, req):
req.setHeader("Accept", ",".join(self.supportedMethods()))
req.setResponseCode(http.NOT_IMPLEMENTED)
req.setContentType("text/plain")
req.write("Request method not supported (%s)" % req.getRequestMethod())
class ErrorPage(Resource):
isLeaf = True
def __init__(self, code, status=None, msg=None):
Resource.__init__(self)
if status is None:
status = http.getStatus(code)
if msg is None:
msg = status
self.code = code
self.status = status
self.msg = msg
def render(self, req):
req.setResponseCode(self.code, self.status)
req.setContentType("text/plain")
req.write(self.msg)
| gpl-2.0 |
HydrelioxGitHub/home-assistant | homeassistant/components/notify/html5.py | 3 | 17519 | """
HTML5 Push Messaging notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.html5/
"""
import datetime
import json
import logging
from functools import partial
import time
import uuid
from aiohttp.hdrs import AUTHORIZATION
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.util.json import load_json, save_json
from homeassistant.exceptions import HomeAssistantError
from homeassistant.components import websocket_api
from homeassistant.components.frontend import add_manifest_json_key
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TITLE, ATTR_TARGET, PLATFORM_SCHEMA, ATTR_TITLE_DEFAULT,
BaseNotificationService, DOMAIN)
from homeassistant.const import (
URL_ROOT, HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED, HTTP_INTERNAL_SERVER_ERROR)
from homeassistant.helpers import config_validation as cv
from homeassistant.util import ensure_unique_string
REQUIREMENTS = ['pywebpush==1.6.0']
DEPENDENCIES = ['frontend']
_LOGGER = logging.getLogger(__name__)
REGISTRATIONS_FILE = 'html5_push_registrations.conf'
SERVICE_DISMISS = 'html5_dismiss'
ATTR_GCM_SENDER_ID = 'gcm_sender_id'
ATTR_GCM_API_KEY = 'gcm_api_key'
ATTR_VAPID_PUB_KEY = 'vapid_pub_key'
ATTR_VAPID_PRV_KEY = 'vapid_prv_key'
ATTR_VAPID_EMAIL = 'vapid_email'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(ATTR_GCM_SENDER_ID): cv.string,
vol.Optional(ATTR_GCM_API_KEY): cv.string,
vol.Optional(ATTR_VAPID_PUB_KEY): cv.string,
vol.Optional(ATTR_VAPID_PRV_KEY): cv.string,
vol.Optional(ATTR_VAPID_EMAIL): cv.string,
})
ATTR_SUBSCRIPTION = 'subscription'
ATTR_BROWSER = 'browser'
ATTR_NAME = 'name'
ATTR_ENDPOINT = 'endpoint'
ATTR_KEYS = 'keys'
ATTR_AUTH = 'auth'
ATTR_P256DH = 'p256dh'
ATTR_EXPIRATIONTIME = 'expirationTime'
ATTR_TAG = 'tag'
ATTR_ACTION = 'action'
ATTR_ACTIONS = 'actions'
ATTR_TYPE = 'type'
ATTR_URL = 'url'
ATTR_DISMISS = 'dismiss'
ATTR_JWT = 'jwt'
WS_TYPE_APPKEY = 'notify/html5/appkey'
SCHEMA_WS_APPKEY = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_APPKEY
})
# The number of days after the moment a notification is sent that a JWT
# is valid.
JWT_VALID_DAYS = 7
KEYS_SCHEMA = vol.All(
dict, vol.Schema({
vol.Required(ATTR_AUTH): cv.string,
vol.Required(ATTR_P256DH): cv.string,
})
)
SUBSCRIPTION_SCHEMA = vol.All(
dict, vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_ENDPOINT): vol.Url(),
vol.Required(ATTR_KEYS): KEYS_SCHEMA,
vol.Optional(ATTR_EXPIRATIONTIME): vol.Any(None, cv.positive_int),
})
)
DISMISS_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
})
REGISTER_SCHEMA = vol.Schema({
vol.Required(ATTR_SUBSCRIPTION): SUBSCRIPTION_SCHEMA,
vol.Required(ATTR_BROWSER): vol.In(['chrome', 'firefox']),
vol.Optional(ATTR_NAME): cv.string
})
CALLBACK_EVENT_PAYLOAD_SCHEMA = vol.Schema({
vol.Required(ATTR_TAG): cv.string,
vol.Required(ATTR_TYPE): vol.In(['received', 'clicked', 'closed']),
vol.Required(ATTR_TARGET): cv.string,
vol.Optional(ATTR_ACTION): cv.string,
vol.Optional(ATTR_DATA): dict,
})
NOTIFY_CALLBACK_EVENT = 'html5_notification'
# Badge and timestamp are Chrome specific (not in official spec)
HTML5_SHOWNOTIFICATION_PARAMETERS = (
'actions', 'badge', 'body', 'dir', 'icon', 'image', 'lang',
'renotify', 'requireInteraction', 'tag', 'timestamp', 'vibrate')
def get_service(hass, config, discovery_info=None):
"""Get the HTML5 push notification service."""
json_path = hass.config.path(REGISTRATIONS_FILE)
registrations = _load_config(json_path)
if registrations is None:
return None
vapid_pub_key = config.get(ATTR_VAPID_PUB_KEY)
vapid_prv_key = config.get(ATTR_VAPID_PRV_KEY)
vapid_email = config.get(ATTR_VAPID_EMAIL)
def websocket_appkey(hass, connection, msg):
connection.send_message(
websocket_api.result_message(msg['id'], vapid_pub_key))
hass.components.websocket_api.async_register_command(
WS_TYPE_APPKEY, websocket_appkey, SCHEMA_WS_APPKEY
)
hass.http.register_view(
HTML5PushRegistrationView(registrations, json_path))
hass.http.register_view(HTML5PushCallbackView(registrations))
gcm_api_key = config.get(ATTR_GCM_API_KEY)
gcm_sender_id = config.get(ATTR_GCM_SENDER_ID)
if gcm_sender_id is not None:
add_manifest_json_key(
ATTR_GCM_SENDER_ID, config.get(ATTR_GCM_SENDER_ID))
return HTML5NotificationService(
hass, gcm_api_key, vapid_prv_key, vapid_email, registrations,
json_path)
def _load_config(filename):
"""Load configuration."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
class HTML5PushRegistrationView(HomeAssistantView):
"""Accepts push registrations from a browser."""
url = '/api/notify.html5'
name = 'api:notify.html5'
def __init__(self, registrations, json_path):
"""Init HTML5PushRegistrationView."""
self.registrations = registrations
self.json_path = json_path
async def post(self, request):
"""Accept the POST request for push registrations from a browser."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
try:
data = REGISTER_SCHEMA(data)
except vol.Invalid as ex:
return self.json_message(
humanize_error(data, ex), HTTP_BAD_REQUEST)
devname = data.get(ATTR_NAME)
data.pop(ATTR_NAME, None)
name = self.find_registration_name(data, devname)
previous_registration = self.registrations.get(name)
self.registrations[name] = data
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
return self.json_message(
'Push notification subscriber registered.')
except HomeAssistantError:
if previous_registration is not None:
self.registrations[name] = previous_registration
else:
self.registrations.pop(name)
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
def find_registration_name(self, data, suggested=None):
"""Find a registration name matching data or generate a unique one."""
endpoint = data.get(ATTR_SUBSCRIPTION).get(ATTR_ENDPOINT)
for key, registration in self.registrations.items():
subscription = registration.get(ATTR_SUBSCRIPTION)
if subscription.get(ATTR_ENDPOINT) == endpoint:
return key
return ensure_unique_string(suggested or 'unnamed device',
self.registrations)
async def delete(self, request):
"""Delete a registration."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
subscription = data.get(ATTR_SUBSCRIPTION)
found = None
for key, registration in self.registrations.items():
if registration.get(ATTR_SUBSCRIPTION) == subscription:
found = key
break
if not found:
# If not found, unregistering was already done. Return 200
return self.json_message('Registration not found.')
reg = self.registrations.pop(found)
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
except HomeAssistantError:
self.registrations[found] = reg
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
return self.json_message('Push notification subscriber unregistered.')
class HTML5PushCallbackView(HomeAssistantView):
"""Accepts push registrations from a browser."""
requires_auth = False
url = '/api/notify.html5/callback'
name = 'api:notify.html5/callback'
def __init__(self, registrations):
"""Init HTML5PushCallbackView."""
self.registrations = registrations
def decode_jwt(self, token):
"""Find the registration that signed this JWT and return it."""
import jwt
# 1. Check claims w/o verifying to see if a target is in there.
# 2. If target in claims, attempt to verify against the given name.
# 2a. If decode is successful, return the payload.
# 2b. If decode is unsuccessful, return a 401.
target_check = jwt.decode(token, verify=False)
if target_check.get(ATTR_TARGET) in self.registrations:
possible_target = self.registrations[target_check[ATTR_TARGET]]
key = possible_target[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
try:
return jwt.decode(token, key, algorithms=["ES256", "HS256"])
except jwt.exceptions.DecodeError:
pass
return self.json_message('No target found in JWT',
status_code=HTTP_UNAUTHORIZED)
# The following is based on code from Auth0
# https://auth0.com/docs/quickstart/backend/python
def check_authorization_header(self, request):
"""Check the authorization header."""
import jwt
auth = request.headers.get(AUTHORIZATION, None)
if not auth:
return self.json_message('Authorization header is expected',
status_code=HTTP_UNAUTHORIZED)
parts = auth.split()
if parts[0].lower() != 'bearer':
return self.json_message('Authorization header must '
'start with Bearer',
status_code=HTTP_UNAUTHORIZED)
if len(parts) != 2:
return self.json_message('Authorization header must '
'be Bearer token',
status_code=HTTP_UNAUTHORIZED)
token = parts[1]
try:
payload = self.decode_jwt(token)
except jwt.exceptions.InvalidTokenError:
return self.json_message('token is invalid',
status_code=HTTP_UNAUTHORIZED)
return payload
async def post(self, request):
"""Accept the POST request for push registrations event callback."""
auth_check = self.check_authorization_header(request)
if not isinstance(auth_check, dict):
return auth_check
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
event_payload = {
ATTR_TAG: data.get(ATTR_TAG),
ATTR_TYPE: data[ATTR_TYPE],
ATTR_TARGET: auth_check[ATTR_TARGET],
}
if data.get(ATTR_ACTION) is not None:
event_payload[ATTR_ACTION] = data.get(ATTR_ACTION)
if data.get(ATTR_DATA) is not None:
event_payload[ATTR_DATA] = data.get(ATTR_DATA)
try:
event_payload = CALLBACK_EVENT_PAYLOAD_SCHEMA(event_payload)
except vol.Invalid as ex:
_LOGGER.warning("Callback event payload is not valid: %s",
humanize_error(event_payload, ex))
event_name = '{}.{}'.format(NOTIFY_CALLBACK_EVENT,
event_payload[ATTR_TYPE])
request.app['hass'].bus.fire(event_name, event_payload)
return self.json({'status': 'ok', 'event': event_payload[ATTR_TYPE]})
class HTML5NotificationService(BaseNotificationService):
"""Implement the notification service for HTML5."""
def __init__(self, hass, gcm_key, vapid_prv, vapid_email, registrations,
json_path):
"""Initialize the service."""
self._gcm_key = gcm_key
self._vapid_prv = vapid_prv
self._vapid_claims = {"sub": "mailto:{}".format(vapid_email)}
self.registrations = registrations
self.registrations_json_path = json_path
async def async_dismiss_message(service):
"""Handle dismissing notification message service calls."""
kwargs = {}
if self.targets is not None:
kwargs[ATTR_TARGET] = self.targets
elif service.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = service.data.get(ATTR_TARGET)
kwargs[ATTR_DATA] = service.data.get(ATTR_DATA)
await self.async_dismiss(**kwargs)
hass.services.async_register(
DOMAIN, SERVICE_DISMISS, async_dismiss_message,
schema=DISMISS_SERVICE_SCHEMA)
@property
def targets(self):
"""Return a dictionary of registered targets."""
targets = {}
for registration in self.registrations:
targets[registration] = registration
return targets
def dismiss(self, **kwargs):
"""Dismisses a notification."""
data = kwargs.get(ATTR_DATA)
tag = data.get(ATTR_TAG) if data else ""
payload = {
ATTR_TAG: tag,
ATTR_DISMISS: True,
ATTR_DATA: {}
}
self._push_message(payload, **kwargs)
async def async_dismiss(self, **kwargs):
"""Dismisses a notification.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.dismiss, **kwargs))
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
tag = str(uuid.uuid4())
payload = {
'badge': '/static/images/notification-badge.png',
'body': message,
ATTR_DATA: {},
'icon': '/static/icons/favicon-192x192.png',
ATTR_TAG: tag,
ATTR_TITLE: kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
}
data = kwargs.get(ATTR_DATA)
if data:
# Pick out fields that should go into the notification directly vs
# into the notification data dictionary.
data_tmp = {}
for key, val in data.items():
if key in HTML5_SHOWNOTIFICATION_PARAMETERS:
payload[key] = val
else:
data_tmp[key] = val
payload[ATTR_DATA] = data_tmp
if (payload[ATTR_DATA].get(ATTR_URL) is None and
payload.get(ATTR_ACTIONS) is None):
payload[ATTR_DATA][ATTR_URL] = URL_ROOT
self._push_message(payload, **kwargs)
def _push_message(self, payload, **kwargs):
"""Send the message."""
import jwt
from pywebpush import WebPusher, webpush
timestamp = int(time.time())
payload['timestamp'] = (timestamp*1000) # Javascript ms since epoch
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = self.registrations.keys()
for target in list(targets):
info = self.registrations.get(target)
if info is None:
_LOGGER.error("%s is not a valid HTML5 push notification"
" target", target)
continue
jwt_exp = (datetime.datetime.fromtimestamp(timestamp) +
datetime.timedelta(days=JWT_VALID_DAYS))
jwt_secret = info[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
jwt_claims = {'exp': jwt_exp, 'nbf': timestamp,
'iat': timestamp, ATTR_TARGET: target,
ATTR_TAG: payload[ATTR_TAG]}
jwt_token = jwt.encode(jwt_claims, jwt_secret).decode('utf-8')
payload[ATTR_DATA][ATTR_JWT] = jwt_token
if self._vapid_prv and self._vapid_claims:
response = webpush(
info[ATTR_SUBSCRIPTION],
json.dumps(payload),
vapid_private_key=self._vapid_prv,
vapid_claims=self._vapid_claims
)
else:
# Only pass the gcm key if we're actually using GCM
# If we don't, notifications break on FireFox
gcm_key = self._gcm_key \
if 'googleapis.com' \
in info[ATTR_SUBSCRIPTION][ATTR_ENDPOINT] \
else None
response = WebPusher(info[ATTR_SUBSCRIPTION]).send(
json.dumps(payload), gcm_key=gcm_key, ttl='86400'
)
if response.status_code == 410:
_LOGGER.info("Notification channel has expired")
reg = self.registrations.pop(target)
if not save_json(self.registrations_json_path,
self.registrations):
self.registrations[target] = reg
_LOGGER.error("Error saving registration")
else:
_LOGGER.info("Configuration saved")
| apache-2.0 |
kamalneet/airline-data-analysis | py/g1q2.py | 1 | 2021 | from __future__ import print_function
import os
import sys
import time
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
csvFields = ["Year", "Month", "DayofMonth", "DayOfWeek", "UniqueCarrier", "Origin", "Dest", "CRSDepTime", "DepDelay", "ArrDelay", "Cancelled", "Diverted"]
def getFieldIndex(field):
i = 0
for f in csvFields:
if f == field:
return i
i+=1
return -1
def airlineMapper(csv):
# print(csv)
toks=csv[1].split(",")
if toks[0] == "Year":
return []
airline_idx = getFieldIndex("UniqueCarrier")
delay_idx = getFieldIndex("ArrDelay")
delay = toks[delay_idx]
return (toks[airline_idx], delay)
# state: (sum_of_delays, num_flights)
def countReducer(newvals, old_state):
# print("values: " + str(v1) + " " + str(v2))
old = old_state or (0, 0)
# print("old=" + str(old))
# print(newvals)
return (sum(newvals) + old[0], len(newvals) + old[1])
def computeAvg(kv):
return (kv[0], float(kv[1][0])/kv[1][1])
def createContext():
# If you do not see this printed, that means the StreamingContext has been loaded
# from the new checkpoint
print("Creating new context")
sc = SparkContext(appName="AirlineDataAnalysis")
ssc = StreamingContext(sc, 1)
csvStream = KafkaUtils.createDirectStream(ssc, ["airline"], {"metadata.broker.list": "172.31.81.70:9092", "auto.offset.reset": "smallest"})
# "_" is added by cleanup script for records where it is not available
airlineCounts = csvStream.map(airlineMapper).filter(lambda kvtup: kvtup[1] != "_").map(lambda kvtup: (kvtup[0], int(kvtup[1]))).updateStateByKey(countReducer).map(computeAvg)
airlineCounts.pprint()
airlineCounts.saveAsTextFiles("airline_delays")
return ssc
if __name__ == "__main__":
ssc = StreamingContext.getOrCreate("/home/centos/spark-checkpoint",
lambda: createContext())
ssc.start()
ssc.awaitTermination()
print("await done")
| apache-2.0 |
alfa-addon/addon | plugin.video.alfa/lib/sambatools/smb/smb_structs.py | 2 | 53405 |
import os, sys, struct, types, logging, binascii, time
from StringIO import StringIO
from smb_constants import *
# Set to True if you want to enable support for extended security. Required for Windows Vista and later
SUPPORT_EXTENDED_SECURITY = True
# Set to True if you want to enable SMB2 protocol.
SUPPORT_SMB2 = True
# Set to True if you want to enable SMB2.1 and above protocol.
SUPPORT_SMB2x = True
# Supported dialects
NT_LAN_MANAGER_DIALECT = 0 # 'NT LM 0.12' is always the first element in the dialect list and must always be included (MS-SMB 2.2.4.5.1)
# Return the list of support SMB dialects based on the SUPPORT_x constants
def init_dialects_list():
dialects = [ 'NT LM 0.12' ]
if SUPPORT_SMB2:
dialects.append('SMB 2.002')
if SUPPORT_SMB2x:
dialects.append('SMB 2.???')
return dialects
class UnsupportedFeature(Exception):
"""
Raised when an supported feature is present/required in the protocol but is not
currently supported by pysmb
"""
pass
class ProtocolError(Exception):
def __init__(self, message, data_buf = None, smb_message = None):
self.message = message
self.data_buf = data_buf
self.smb_message = smb_message
def __str__(self):
b = StringIO()
b.write(self.message + os.linesep)
if self.smb_message:
b.write('=' * 20 + ' SMB Message ' + '=' * 20 + os.linesep)
b.write(str(self.smb_message))
if self.data_buf:
b.write('=' * 20 + ' SMB Data Packet (hex) ' + '=' * 20 + os.linesep)
b.write(binascii.hexlify(self.data_buf))
b.write(os.linesep)
return b.getvalue()
class SMB2ProtocolHeaderError(ProtocolError):
def __init__(self):
ProtocolError.__init__(self, "Packet header belongs to SMB2")
class OperationFailure(Exception):
def __init__(self, message, smb_messages):
self.args = [ message ]
self.message = message
self.smb_messages = smb_messages
def __str__(self):
b = StringIO()
b.write(self.message + os.linesep)
for idx, m in enumerate(self.smb_messages):
b.write('=' * 20 + ' SMB Message %d ' % idx + '=' * 20 + os.linesep)
b.write('SMB Header:' + os.linesep)
b.write('-----------' + os.linesep)
b.write(str(m))
b.write('SMB Data Packet (hex):' + os.linesep)
b.write('----------------------' + os.linesep)
b.write(binascii.hexlify(m.raw_data))
b.write(os.linesep)
return b.getvalue()
class SMBError:
def __init__(self):
self.reset()
def reset(self):
self.internal_value = 0L
self.is_ntstatus = True
def __str__(self):
if self.is_ntstatus:
return 'NTSTATUS=0x%08X' % self.internal_value
else:
return 'ErrorClass=0x%02X ErrorCode=0x%04X' % ( self.internal_value >> 24, self.internal_value & 0xFFFF )
@property
def hasError(self):
return self.internal_value != 0
class SMBMessage:
HEADER_STRUCT_FORMAT = "<4sBIBHHQxxHHHHB"
HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT)
log = logging.getLogger('SMB.SMBMessage')
protocol = 1
def __init__(self, conn, payload = None):
self.reset()
self.conn = conn
if payload:
self.payload = payload
self.payload.initMessage(self)
def __str__(self):
b = StringIO()
b.write('Command: 0x%02X (%s) %s' % ( self.command, SMB_COMMAND_NAMES.get(self.command, '<unknown>'), os.linesep ))
b.write('Status: %s %s' % ( str(self.status), os.linesep ))
b.write('Flags: 0x%02X %s' % ( self.flags, os.linesep ))
b.write('Flags2: 0x%04X %s' % ( self.flags2, os.linesep ))
b.write('PID: %d %s' % ( self.pid, os.linesep ))
b.write('UID: %d %s' % ( self.uid, os.linesep ))
b.write('MID: %d %s' % ( self.mid, os.linesep ))
b.write('TID: %d %s' % ( self.tid, os.linesep ))
b.write('Security: 0x%016X %s' % ( self.security, os.linesep ))
b.write('Parameters: %d bytes %s%s %s' % ( len(self.parameters_data), os.linesep, binascii.hexlify(self.parameters_data), os.linesep ))
b.write('Data: %d bytes %s%s %s' % ( len(self.data), os.linesep, binascii.hexlify(self.data), os.linesep ))
return b.getvalue()
def reset(self):
self.raw_data = ''
self.command = 0
self.status = SMBError()
self.flags = 0
self.flags2 = 0
self.pid = 0
self.tid = 0
self.uid = 0
self.mid = 0
self.security = 0L
self.parameters_data = ''
self.data = ''
self.payload = None
@property
def isReply(self):
return bool(self.flags & SMB_FLAGS_REPLY)
@property
def hasExtendedSecurity(self):
return bool(self.flags2 & SMB_FLAGS2_EXTENDED_SECURITY)
def encode(self):
"""
Encode this SMB message into a series of bytes suitable to be embedded with a NetBIOS session message.
AssertionError will be raised if this SMB message has not been initialized with a Payload instance
@return: a string containing the encoded SMB message
"""
assert self.payload
self.pid = os.getpid()
self.payload.prepare(self)
parameters_len = len(self.parameters_data)
assert parameters_len % 2 == 0
headers_data = struct.pack(self.HEADER_STRUCT_FORMAT,
'\xFFSMB', self.command, self.status.internal_value, self.flags,
self.flags2, (self.pid >> 16) & 0xFFFF, self.security, self.tid,
self.pid & 0xFFFF, self.uid, self.mid, int(parameters_len / 2))
return headers_data + self.parameters_data + struct.pack('<H', len(self.data)) + self.data
def decode(self, buf):
"""
Decodes the SMB message in buf.
All fields of the SMBMessage object will be reset to default values before decoding.
On errors, do not assume that the fields will be reinstated back to what they are before
this method is invoked.
@param buf: data containing one complete SMB message
@type buf: string
@return: a positive integer indicating the number of bytes used in buf to decode this SMB message
@raise ProtocolError: raised when decoding fails
"""
buf_len = len(buf)
if buf_len < self.HEADER_STRUCT_SIZE:
# We need at least 32 bytes (header) + 1 byte (parameter count)
raise ProtocolError('Not enough data to decode SMB header', buf)
self.reset()
protocol, self.command, status, self.flags, \
self.flags2, pid_high, self.security, self.tid, \
pid_low, self.uid, self.mid, params_count = struct.unpack(self.HEADER_STRUCT_FORMAT, buf[:self.HEADER_STRUCT_SIZE])
if protocol == '\xFESMB':
raise SMB2ProtocolHeaderError()
if protocol != '\xFFSMB':
raise ProtocolError('Invalid 4-byte protocol field', buf)
self.pid = (pid_high << 16) | pid_low
self.status.internal_value = status
self.status.is_ntstatus = bool(self.flags2 & SMB_FLAGS2_NT_STATUS)
offset = self.HEADER_STRUCT_SIZE
if buf_len < params_count * 2 + 2:
# Not enough data in buf to decode up to body length
raise ProtocolError('Not enough data. Parameters list decoding failed', buf)
datalen_offset = offset + params_count*2
body_len = struct.unpack('<H', buf[datalen_offset:datalen_offset+2])[0]
if body_len > 0 and buf_len < (datalen_offset + 2 + body_len):
# Not enough data in buf to decode body
raise ProtocolError('Not enough data. Body decoding failed', buf)
self.parameters_data = buf[offset:datalen_offset]
if body_len > 0:
self.data = buf[datalen_offset+2:datalen_offset+2+body_len]
self.raw_data = buf
self._decodePayload()
return self.HEADER_STRUCT_SIZE + params_count * 2 + 2 + body_len
def _decodePayload(self):
if self.command == SMB_COM_READ_ANDX:
self.payload = ComReadAndxResponse()
elif self.command == SMB_COM_WRITE_ANDX:
self.payload = ComWriteAndxResponse()
elif self.command == SMB_COM_TRANSACTION:
self.payload = ComTransactionResponse()
elif self.command == SMB_COM_TRANSACTION2:
self.payload = ComTransaction2Response()
elif self.command == SMB_COM_OPEN_ANDX:
self.payload = ComOpenAndxResponse()
elif self.command == SMB_COM_NT_CREATE_ANDX:
self.payload = ComNTCreateAndxResponse()
elif self.command == SMB_COM_TREE_CONNECT_ANDX:
self.payload = ComTreeConnectAndxResponse()
elif self.command == SMB_COM_ECHO:
self.payload = ComEchoResponse()
elif self.command == SMB_COM_SESSION_SETUP_ANDX:
self.payload = ComSessionSetupAndxResponse()
elif self.command == SMB_COM_NEGOTIATE:
self.payload = ComNegotiateResponse()
if self.payload:
self.payload.decode(self)
class Payload:
DEFAULT_ANDX_PARAM_HEADER = '\xFF\x00\x00\x00'
DEFAULT_ANDX_PARAM_SIZE = 4
def initMessage(self, message):
# SMB_FLAGS2_UNICODE must always be enabled. Without this, almost all the Payload subclasses will need to be
# rewritten to check for OEM/Unicode strings which will be tedious. Fortunately, almost all tested CIFS services
# support SMB_FLAGS2_UNICODE by default.
assert message.payload == self
message.flags = SMB_FLAGS_CASE_INSENSITIVE | SMB_FLAGS_CANONICALIZED_PATHS
message.flags2 = SMB_FLAGS2_UNICODE | SMB_FLAGS2_NT_STATUS | SMB_FLAGS2_LONG_NAMES | SMB_FLAGS2_EAS
if SUPPORT_EXTENDED_SECURITY:
message.flags2 |= SMB_FLAGS2_EXTENDED_SECURITY | SMB_FLAGS2_SMB_SECURITY_SIGNATURE
def prepare(self, message):
raise NotImplementedError
def decode(self, message):
raise NotImplementedError
class ComNegotiateRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.52.1
- [MS-SMB]: 2.2.4.5.1
"""
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_NEGOTIATE
def prepare(self, message):
assert message.payload == self
message.parameters_data = ''
message.data = ''.join(map(lambda s: '\x02'+s+'\x00', init_dialects_list()))
class ComNegotiateResponse(Payload):
"""
Contains information on the SMB_COM_NEGOTIATE response from server
After calling the decode method, each instance will contain the following attributes,
- security_mode (integer)
- max_mpx_count (integer)
- max_number_vcs (integer)
- max_buffer_size (long)
- max_raw_size (long)
- session_key (long)
- capabilities (long)
- system_time (long)
- server_time_zone (integer)
- challenge_length (integer)
If the underlying SMB message's flag2 does not have SMB_FLAGS2_EXTENDED_SECURITY bit enabled,
then the instance will have the following additional attributes,
- challenge (string)
- domain (unicode)
If the underlying SMB message's flags2 has SMB_FLAGS2_EXTENDED_SECURITY bit enabled,
then the instance will have the following additional attributes,
- server_guid (string)
- security_blob (string)
References:
===========
- [MS-SMB]: 2.2.4.5.2.1
- [MS-CIFS]: 2.2.4.52.2
"""
PAYLOAD_STRUCT_FORMAT = '<HBHHIIIIQHB'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_NEGOTIATE
if not message.isReply:
raise ProtocolError('Not a SMB_COM_NEGOTIATE reply', message.raw_data, message)
self.security_mode, self.max_mpx_count, self.max_number_vcs, self.max_buffer_size, \
self.max_raw_size, self.session_key, self.capabilities, self.system_time, self.server_time_zone, \
self.challenge_length = ( 0, ) * 10
data_len = len(message.parameters_data)
if data_len < 2:
raise ProtocolError('Not enough data to decode SMB_COM_NEGOTIATE dialect_index field', message.raw_data, message)
self.dialect_index = struct.unpack('<H', message.parameters_data[:2])[0]
if self.dialect_index == NT_LAN_MANAGER_DIALECT:
if data_len != (0x11 * 2):
raise ProtocolError('NT LAN Manager dialect selected in SMB_COM_NEGOTIATE but parameters bytes count (%d) does not meet specs' % data_len,
message.raw_data, message)
else:
_, self.security_mode, self.max_mpx_count, self.max_number_vcs, self.max_buffer_size, \
self.max_raw_size, self.session_key, self.capabilities, self.system_time, self.server_time_zone, \
self.challenge_length = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE])
elif self.dialect_index == 0xFFFF:
raise ProtocolError('Server does not support any of the pysmb dialects. Please email pysmb to add in support for your OS',
message.raw_data, message)
else:
raise ProtocolError('Unknown dialect index (0x%04X)' % self.dialect_index, message.raw_data, message)
data_len = len(message.data)
if not message.hasExtendedSecurity:
self.challenge, self.domain = '', ''
if self.challenge_length > 0:
if data_len >= self.challenge_length:
self.challenge = message.data[:self.challenge_length]
s = ''
offset = self.challenge_length
while offset < data_len:
_s = message.data[offset:offset+2]
if _s == '\0\0':
self.domain = s.decode('UTF-16LE')
break
else:
s += _s
offset += 2
else:
raise ProtocolError('Not enough data to decode SMB_COM_NEGOTIATE (without security extensions) Challenge field', message.raw_data, message)
else:
if data_len < 16:
raise ProtocolError('Not enough data to decode SMB_COM_NEGOTIATE (with security extensions) ServerGUID field', message.raw_data, message)
self.server_guid = message.data[:16]
self.security_blob = message.data[16:]
@property
def supportsExtendedSecurity(self):
return bool(self.capabilities & CAP_EXTENDED_SECURITY)
class ComSessionSetupAndxRequest__WithSecurityExtension(Payload):
"""
References:
===========
- [MS-SMB]: 2.2.4.6.1
"""
PAYLOAD_STRUCT_FORMAT = '<HHHIHII'
def __init__(self, session_key, security_blob):
self.session_key = session_key
self.security_blob = security_blob
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_SESSION_SETUP_ANDX
def prepare(self, message):
assert message.hasExtendedSecurity
message.flags2 |= SMB_FLAGS2_UNICODE
cap = CAP_UNICODE | CAP_STATUS32 | CAP_EXTENDED_SECURITY | CAP_NT_SMBS
message.parameters_data = \
self.DEFAULT_ANDX_PARAM_HEADER + \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
16644, 10, 1, self.session_key, len(self.security_blob), 0, cap)
message.data = self.security_blob
if (SMBMessage.HEADER_STRUCT_SIZE + len(message.parameters_data) + len(message.data)) % 2 != 0:
message.data = message.data + '\0'
message.data = message.data + '\0' * 4
class ComSessionSetupAndxRequest__NoSecurityExtension(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.53.1
"""
PAYLOAD_STRUCT_FORMAT = '<HHHIHHII'
def __init__(self, session_key, username, password, is_unicode, domain):
self.username = username
self.session_key = session_key
self.password = password
self.is_unicode = is_unicode
self.domain = domain
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_SESSION_SETUP_ANDX
def prepare(self, message):
if self.is_unicode:
message.flags2 |= SMB_FLAGS2_UNICODE
else:
message.flags2 &= (~SMB_FLAGS2_UNICODE & 0xFFFF)
password_len = len(self.password)
message.parameters_data = \
self.DEFAULT_ANDX_PARAM_HEADER + \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
16644, 10, 0, self.session_key,
(not self.is_unicode and password_len) or 0,
(self.is_unicode and password_len) or 0,
0,
CAP_UNICODE | CAP_LARGE_FILES | CAP_STATUS32)
est_offset = SMBMessage.HEADER_STRUCT_SIZE + len(message.parameters_data) # To check if data until SMB paramaters are aligned to a 16-bit boundary
message.data = self.password
if (est_offset + len(message.data)) % 2 != 0 and message.flags2 & SMB_FLAGS2_UNICODE:
message.data = message.data + '\0'
if message.flags2 & SMB_FLAGS2_UNICODE:
message.data = message.data + self.username.encode('UTF-16LE') + '\0'
else:
message.data = message.data + str(self.username) + '\0'
if (est_offset + len(message.data)) % 2 != 0 and message.flags2 & SMB_FLAGS2_UNICODE:
message.data = message.data + '\0'
if message.flags2 & SMB_FLAGS2_UNICODE:
message.data = message.data + self.domain.encode('UTF-16LE') + '\0\0' + 'pysmb'.encode('UTF-16LE') + '\0\0'
else:
message.data = message.data + self.domain + '\0pysmb\0'
class ComSessionSetupAndxResponse(Payload):
"""
Contains information on the SMB_COM_SESSION_SETUP_ANDX response from server
If the underlying SMB message's flags2 does not have SMB_FLAGS2_EXTENDED_SECURITY bit enabled,
then the instance will have the following attributes,
- action
If the underlying SMB message's flags2 has SMB_FLAGS2_EXTENDED_SECURITY bit enabled
and the message status is STATUS_MORE_PROCESSING_REQUIRED or equals to 0x00 (no error),
then the instance will have the following attributes,
- action
- securityblob
If the underlying SMB message's flags2 has SMB_FLAGS2_EXTENDED_SECURITY bit enabled but
the message status is not STATUS_MORE_PROCESSING_REQUIRED
References:
===========
- [MS-SMB]: 2.2.4.6.2
- [MS-CIFS]: 2.2.4.53.2
"""
NOSECURE_PARAMETER_STRUCT_FORMAT = '<BBHH'
NOSECURE_PARAMETER_STRUCT_SIZE = struct.calcsize(NOSECURE_PARAMETER_STRUCT_FORMAT)
SECURE_PARAMETER_STRUCT_FORMAT = '<BBHHH'
SECURE_PARAMETER_STRUCT_SIZE = struct.calcsize(SECURE_PARAMETER_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_SESSION_SETUP_ANDX
if not message.hasExtendedSecurity:
if not message.status.hasError:
if len(message.parameters_data) < self.NOSECURE_PARAMETER_STRUCT_SIZE:
raise ProtocolError('Not enough data to decode SMB_COM_SESSION_SETUP_ANDX (no security extensions) parameters', message.raw_data, message)
_, _, _, self.action = struct.unpack(self.NOSECURE_PARAMETER_STRUCT_FORMAT, message.parameters_data[:self.NOSECURE_PARAMETER_STRUCT_SIZE])
else:
if not message.status.hasError or message.status.internal_value == 0xc0000016: # STATUS_MORE_PROCESSING_REQUIRED
if len(message.parameters_data) < self.SECURE_PARAMETER_STRUCT_SIZE:
raise ProtocolError('Not enough data to decode SMB_COM_SESSION_SETUP_ANDX (with security extensions) parameters', message.raw_data, message)
_, _, _, self.action, blob_length = struct.unpack(self.SECURE_PARAMETER_STRUCT_FORMAT, message.parameters_data[:self.SECURE_PARAMETER_STRUCT_SIZE])
if len(message.data) < blob_length:
raise ProtocolError('Not enough data to decode SMB_COM_SESSION_SETUP_ANDX (with security extensions) security blob', message.raw_data, message)
self.security_blob = message.data[:blob_length]
class ComTreeConnectAndxRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.55.1
- [MS-SMB]: 2.2.4.7.1
"""
PAYLOAD_STRUCT_FORMAT = '<HH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def __init__(self, path, service, password = ''):
self.path = path
self.service = service
self.password = password + '\0'
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_TREE_CONNECT_ANDX
def prepare(self, message):
password_len = len(self.password)
message.parameters_data = \
self.DEFAULT_ANDX_PARAM_HEADER + \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
0x08 | \
((message.hasExtendedSecurity and 0x0004) or 0x00) | \
((message.tid and message.tid != 0xFFFF and 0x0001) or 0x00), # Disconnect tid, if message.tid must be non-zero
password_len)
padding = ''
if password_len % 2 == 0:
padding = '\0'
# Note that service field is never encoded in UTF-16LE. [MS-CIFS]: 2.2.1.1
message.data = self.password + padding + self.path.encode('UTF-16LE') + '\0\0' + self.service + '\0'
class ComTreeConnectAndxResponse(Payload):
"""
Contains information about the SMB_COM_TREE_CONNECT_ANDX response from the server.
If the message has no errors, each instance contains the following attributes:
- optional_support
References:
===========
- [MS-CIFS]: 2.2.4.55.2
"""
PAYLOAD_STRUCT_FORMAT = '<BBHH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_TREE_CONNECT_ANDX
if not message.status.hasError:
if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE:
raise ProtocolError('Not enough data to decode SMB_COM_TREE_CONNECT_ANDX parameters', message.raw_data, message)
_, _, _, self.optional_support = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE])
class ComNTCreateAndxRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.64.1
- [MS-SMB]: 2.2.4.9.1
"""
PAYLOAD_STRUCT_FORMAT = '<BHIIIQIIIIIB'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def __init__(self, filename, flags = 0, root_fid = 0, access_mask = 0, allocation_size = 0L, ext_attr = 0,
share_access = 0, create_disp = 0, create_options = 0, impersonation = 0, security_flags = 0):
self.filename = (filename + '\0').encode('UTF-16LE')
self.flags = flags
self.root_fid = root_fid
self.access_mask = access_mask
self.allocation_size = allocation_size
self.ext_attr = ext_attr
self.share_access = share_access
self.create_disp = create_disp
self.create_options = create_options
self.impersonation = impersonation
self.security_flags = security_flags
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_NT_CREATE_ANDX
def prepare(self, message):
filename_len = len(self.filename)
message.parameters_data = \
self.DEFAULT_ANDX_PARAM_HEADER + \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
0x00, # reserved
filename_len, # NameLength
self.flags, # Flags
self.root_fid, # RootDirectoryFID
self.access_mask, # DesiredAccess
self.allocation_size, # AllocationSize
self.ext_attr, # ExtFileAttributes
self.share_access, # ShareAccess
self.create_disp, # CreateDisposition
self.create_options, # CreateOptions
self.impersonation, # ImpersonationLevel
self.security_flags) # SecurityFlags
padding = ''
if (message.HEADER_STRUCT_SIZE + len(message.parameters_data)) % 2 != 0:
padding = '\0'
message.data = padding + self.filename
class ComNTCreateAndxResponse(Payload):
"""
Contains (partial) information about the SMB_COM_NT_CREATE_ANDX response from the server.
Each instance contains the following attributes after decoding:
- oplock_level
- fid
References:
===========
- [MS-CIFS]: 2.2.4.64.2
"""
PAYLOAD_STRUCT_FORMAT = '<BBHBH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_NT_CREATE_ANDX
if not message.status.hasError:
if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE:
raise ProtocolError('Not enough data to decode SMB_COM_NT_CREATE_ANDX parameters', message.raw_data, message)
_, _, _, self.oplock_level, self.fid = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE])
class ComTransactionRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.33.1
"""
PAYLOAD_STRUCT_FORMAT = '<HHHHBBHIHHHHHH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def __init__(self, max_params_count, max_data_count, max_setup_count,
total_params_count = 0, total_data_count = 0,
params_bytes = '', data_bytes = '', setup_bytes = '',
flags = 0, timeout = 0, name = "\\PIPE\\"):
self.total_params_count = total_params_count or len(params_bytes)
self.total_data_count = total_data_count or len(data_bytes)
self.max_params_count = max_params_count
self.max_data_count = max_data_count
self.max_setup_count = max_setup_count
self.flags = flags
self.timeout = timeout
self.params_bytes = params_bytes
self.data_bytes = data_bytes
self.setup_bytes = setup_bytes
self.name = name
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_TRANSACTION
def prepare(self, message):
name = (self.name + '\0').encode('UTF-16LE')
name_len = len(name)
setup_bytes_len = len(self.setup_bytes)
params_bytes_len = len(self.params_bytes)
data_bytes_len = len(self.data_bytes)
padding0 = ''
offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_bytes_len + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters)
if offset % 2 != 0:
padding0 = '\0'
offset += 1
offset += name_len # For the name field
padding1 = ''
if offset % 4 != 0:
padding1 = '\0'*(4-offset%4)
offset += (4-offset%4)
if params_bytes_len > 0:
params_bytes_offset = offset
offset += params_bytes_len
else:
params_bytes_offset = 0
padding2 = ''
if offset % 4 != 0:
padding2 = '\0'*(4-offset%4)
offset += (4-offset%4)
if data_bytes_len > 0:
data_bytes_offset = offset
else:
data_bytes_offset = 0
message.parameters_data = \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
self.total_params_count,
self.total_data_count,
self.max_params_count,
self.max_data_count,
self.max_setup_count,
0x00, # Reserved1. Must be 0x00
self.flags,
self.timeout,
0x0000, # Reserved2. Must be 0x0000
params_bytes_len,
params_bytes_offset,
data_bytes_len,
data_bytes_offset,
int(setup_bytes_len / 2)) + \
self.setup_bytes
message.data = padding0 + name + padding1 + self.params_bytes + padding2 + self.data_bytes
class ComTransactionResponse(Payload):
"""
Contains information about a SMB_COM_TRANSACTION response from the server
After decoding, each instance contains the following attributes:
- total_params_count (integer)
- total_data_count (integer)
- setup_bytes (string)
- data_bytes (string)
- params_bytes (string)
References:
===========
- [MS-CIFS]: 2.2.4.33.2
"""
PAYLOAD_STRUCT_FORMAT = '<HHHHHHHHHH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_TRANSACTION
if not message.status.hasError:
if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE:
raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION parameters', message.raw_data, message)
self.total_params_count, self.total_data_count, _, \
params_bytes_len, params_bytes_offset, params_bytes_displ, \
data_bytes_len, data_bytes_offset, data_bytes_displ, \
setup_count = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE])
if setup_count > 0:
setup_bytes_len = setup_count * 2
if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE + setup_bytes_len:
raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION parameters', message.raw_data, message)
self.setup_bytes = message.parameters_data[self.PAYLOAD_STRUCT_SIZE:self.PAYLOAD_STRUCT_SIZE+setup_bytes_len]
else:
self.setup_bytes = ''
offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count * 2 + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters)
if params_bytes_len > 0:
self.params_bytes = message.data[params_bytes_offset-offset:params_bytes_offset-offset+params_bytes_len]
else:
self.params_bytes = ''
if data_bytes_len > 0:
self.data_bytes = message.data[data_bytes_offset-offset:data_bytes_offset-offset+data_bytes_len]
else:
self.data_bytes = ''
class ComTransaction2Request(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.46.1
"""
PAYLOAD_STRUCT_FORMAT = 'HHHHBBHIHHHHHH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def __init__(self, max_params_count, max_data_count, max_setup_count,
total_params_count = 0, total_data_count = 0,
params_bytes = '', data_bytes = '', setup_bytes = '',
flags = 0, timeout = 0):
self.total_params_count = total_params_count or len(params_bytes)
self.total_data_count = total_data_count or len(data_bytes)
self.max_params_count = max_params_count
self.max_data_count = max_data_count
self.max_setup_count = max_setup_count
self.flags = flags
self.timeout = timeout
self.params_bytes = params_bytes
self.data_bytes = data_bytes
self.setup_bytes = setup_bytes
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_TRANSACTION2
def prepare(self, message):
setup_bytes_len = len(self.setup_bytes)
params_bytes_len = len(self.params_bytes)
data_bytes_len = len(self.data_bytes)
name = '\0\0'
padding0 = ''
offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_bytes_len + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters)
if offset % 2 != 0:
padding0 = '\0'
offset += 1
offset += 2 # For the name field
padding1 = ''
if offset % 4 != 0:
padding1 = '\0'*(4-offset%4)
if params_bytes_len > 0:
params_bytes_offset = offset
offset += params_bytes_len
else:
params_bytes_offset = 0
padding2 = ''
if offset % 4 != 0:
padding2 = '\0'*(4-offset%4)
if data_bytes_len > 0:
data_bytes_offset = offset
else:
data_bytes_offset = 0
message.parameters_data = \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
self.total_params_count,
self.total_data_count,
self.max_params_count,
self.max_data_count,
self.max_setup_count,
0x00, # Reserved1. Must be 0x00
self.flags,
self.timeout,
0x0000, # Reserved2. Must be 0x0000
params_bytes_len,
params_bytes_offset,
data_bytes_len,
data_bytes_offset,
int(setup_bytes_len / 2)) + \
self.setup_bytes
message.data = padding0 + name + padding1 + self.params_bytes + padding2 + self.data_bytes
class ComTransaction2Response(Payload):
"""
Contains information about a SMB_COM_TRANSACTION2 response from the server
After decoding, each instance contains the following attributes:
- total_params_count (integer)
- total_data_count (integer)
- setup_bytes (string)
- data_bytes (string)
- params_bytes (string)
References:
===========
- [MS-CIFS]: 2.2.4.46.2
"""
PAYLOAD_STRUCT_FORMAT = '<HHHHHHHHHBB'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_TRANSACTION2
if not message.status.hasError:
if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE:
raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION2 parameters', message.raw_data, message)
self.total_params_count, self.total_data_count, _, \
params_bytes_len, params_bytes_offset, params_bytes_displ, \
data_bytes_len, data_bytes_offset, data_bytes_displ, \
setup_count, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE])
if setup_count > 0:
setup_bytes_len = setup_count * 2
if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE + setup_bytes_len:
raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION parameters', message.raw_data, message)
self.setup_bytes = message.parameters_data[self.PAYLOAD_STRUCT_SIZE:self.PAYLOAD_STRUCT_SIZE+setup_bytes_len]
else:
self.setup_bytes = ''
offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count * 2 + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters)
if params_bytes_len > 0:
self.params_bytes = message.data[params_bytes_offset-offset:params_bytes_offset-offset+params_bytes_len]
else:
self.params_bytes = ''
if data_bytes_len > 0:
self.data_bytes = message.data[data_bytes_offset-offset:data_bytes_offset-offset+data_bytes_len]
else:
self.data_bytes = ''
class ComCloseRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.5.1
"""
PAYLOAD_STRUCT_FORMAT = '<HI'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def __init__(self, fid, last_modified_time = 0xFFFFFFFF):
self.fid = fid
self.last_modified_time = last_modified_time
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_CLOSE
def prepare(self, message):
message.parameters_data = struct.pack(self.PAYLOAD_STRUCT_FORMAT, self.fid, self.last_modified_time)
message.data = ''
class ComOpenAndxRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.41.1
"""
PAYLOAD_STRUCT_FORMAT = '<HHHHIHIII'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def __init__(self, filename, access_mode, open_mode, flags = 0x0000, search_attributes = 0, file_attributes = 0, create_time = 0, timeout = 0):
"""
@param create_time: Epoch time value to indicate the time of creation for this file. If zero, we will automatically assign the current time
@type create_time: int
@param timeout: Number of milliseconds to wait for blocked open request before failing
@type timeout: int
"""
self.filename = filename
self.access_mode = access_mode
self.open_mode = open_mode
self.flags = flags
self.search_attributes = search_attributes
self.file_attributes = file_attributes
self.create_time = create_time or int(time.time())
self.timeout = timeout
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_OPEN_ANDX
def prepare(self, message):
message.parameters_data = \
self.DEFAULT_ANDX_PARAM_HEADER + \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
self.flags,
self.access_mode,
self.search_attributes,
self.file_attributes,
self.create_time,
self.open_mode,
0, # AllocationSize
0, # Timeout (in milli-secs)
0) # Reserved
message.data = '\0' + self.filename.encode('UTF-16LE') + '\0\0'
class ComOpenAndxResponse(Payload):
"""
Contains information about a SMB_COM_OPEN_ANDX response from the server
After decoding, each instance will contain the following attributes:
- fid (integer)
- file_attributes (integer)
- last_write_time (long)
- access_rights (integer)
- resource_type (integer)
- open_results (integer)
References:
===========
- [MS-CIFS]: 2.2.4.41.2
- [MS-SMB]: 2.2.4.1.2
"""
PAYLOAD_STRUCT_FORMAT = '<BBHHHIIHHHHHHH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_OPEN_ANDX
if not message.status.hasError:
if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE:
raise ProtocolError('Not enough data to decode SMB_COM_OPEN_ANDX parameters', message.raw_data, message)
_, _, _, self.fid, self.file_attributes, self.last_write_time, _, \
self.access_rights, self.resource_type, _, self.open_results, _, _, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT,
message.parameters_data[:self.PAYLOAD_STRUCT_SIZE])
class ComWriteAndxRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.43.1
- [MS-SMB]: 2.2.4.3.1
"""
PAYLOAD_STRUCT_FORMAT = '<HIIHHHHHI'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def __init__(self, fid, data_bytes, offset, write_mode = 0, timeout = 0):
"""
@param timeout: Number of milliseconds to wait for blocked write request before failing. Must be zero for writing to regular file
@type timeout: int
"""
self.fid = fid
self.offset = offset
self.data_bytes = data_bytes
self.timeout = timeout
self.write_mode = write_mode
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_WRITE_ANDX
def prepare(self, message):
# constant 1 is to account for the pad byte in the message.data
# constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters)
data_offset = message.HEADER_STRUCT_SIZE + self.DEFAULT_ANDX_PARAM_SIZE + self.PAYLOAD_STRUCT_SIZE + 1 + 2
data_len = len(self.data_bytes)
message.parameters_data = \
self.DEFAULT_ANDX_PARAM_HEADER + \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
self.fid,
self.offset & 0xFFFFFFFF,
self.timeout,
self.write_mode,
data_len, # Remaining
0x0000, # Reserved
len(self.data_bytes), # DataLength
data_offset, # DataOffset
self.offset >> 32) # OffsetHigh field defined in [MS-SMB]: 2.2.4.3.1
message.data = '\0' + self.data_bytes
class ComWriteAndxResponse(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.43.2
- [MS-SMB]: 2.2.4.3.2
"""
PAYLOAD_STRUCT_FORMAT = '<BBHHHHH' # We follow the SMB_COM_WRITEX_ANDX server extensions in [MS-SMB]: 2.2.4.3.2
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_WRITE_ANDX
if not message.status.hasError:
if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE:
raise ProtocolError('Not enough data to decode SMB_COM_WRITE_ANDX parameters', message.raw_data, message)
_, _, _, count, self.available, high_count, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE])
self.count = (count & 0xFFFF) | (high_count << 16)
class ComReadAndxRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.42.1
- [MS-SMB]: 2.2.4.2.1
"""
PAYLOAD_STRUCT_FORMAT = '<HIHHIHI'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def __init__(self, fid, offset, max_return_bytes_count, min_return_bytes_count, timeout = 0, remaining = 0):
"""
@param timeout: If reading from a regular file, this parameter must be 0.
@type timeout: int
"""
self.fid = fid
self.remaining = remaining
self.max_return_bytes_count = max_return_bytes_count
self.min_return_bytes_count = min_return_bytes_count
self.offset = offset
self.timeout = timeout
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_READ_ANDX
def prepare(self, message):
message.parameters_data = \
self.DEFAULT_ANDX_PARAM_HEADER + \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
self.fid,
self.offset & 0xFFFFFFFF,
self.max_return_bytes_count,
self.min_return_bytes_count,
self.timeout or (self.max_return_bytes_count >> 32), # Note that in [MS-SMB]: 2.2.4.2.1, this field can also act as MaxCountHigh field
self.remaining, # In [MS-CIFS]: 2.2.4.42.1, this field must be set to 0x0000
self.offset >> 32)
message.data = ''
class ComReadAndxResponse(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.42.2
- [MS-SMB]: 2.2.4.2.2
"""
PAYLOAD_STRUCT_FORMAT = '<BBHHHHHHHHHHH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_READ_ANDX
if not message.status.hasError:
if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE:
raise ProtocolError('Not enough data to decode SMB_COM_READ_ANDX parameters', message.raw_data, message)
_, _, _, _, _, _, self.data_length, data_offset, _, _, _, _, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT,
message.parameters_data[:self.PAYLOAD_STRUCT_SIZE])
offset = data_offset - message.HEADER_STRUCT_SIZE - self.PAYLOAD_STRUCT_SIZE - 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters)
self.data = message.data[offset:offset+self.data_length]
assert len(self.data) == self.data_length
class ComDeleteRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.7.1
"""
def __init__(self, filename_pattern, search_attributes = 0):
self.filename_pattern = filename_pattern
self.search_attributes = search_attributes
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_DELETE
def prepare(self, message):
message.parameters_data = struct.pack('<H', self.search_attributes)
message.data = '\x04' + self.filename_pattern.encode('UTF-16LE') + '\0\0'
class ComCreateDirectoryRequest(Payload):
"""
Although this command has been marked deprecated in [MS-CIFS], we continue to use it for its simplicity
as compared to its replacement TRANS2_CREATE_DIRECTORY sub-command [MS-CIFS]: 2.2.6.14
References:
===========
- [MS-CIFS]: 2.2.4.1.1
"""
def __init__(self, path):
self.path = path
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_CREATE_DIRECTORY
def prepare(self, message):
message.parameters_data = ''
message.data = '\x04' + self.path.encode('UTF-16LE') + '\0\0'
class ComDeleteDirectoryRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.2.1
"""
def __init__(self, path):
self.path = path
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_DELETE_DIRECTORY
def prepare(self, message):
message.parameters_data = ''
message.data = '\x04' + self.path.encode('UTF-16LE') + '\0\0'
class ComRenameRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.8.1
"""
def __init__(self, old_path, new_path, search_attributes = 0):
self.old_path = old_path
self.new_path = new_path
self.search_attributes = search_attributes
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_RENAME
def prepare(self, message):
message.parameters_data = struct.pack('<H', self.search_attributes)
message.data = '\x04' + self.old_path.encode('UTF-16LE') + '\x00\x00\x04\x00' + self.new_path.encode('UTF-16LE') + '\x00\x00'
class ComEchoRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.39.1
"""
def __init__(self, echo_data = b'', echo_count = 1):
self.echo_count = echo_count
self.echo_data = echo_data
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_ECHO
message.tid = 0xFFFF
def prepare(self, message):
message.parameters_data = struct.pack('<H', self.echo_count)
message.data = self.echo_data
class ComEchoResponse(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.39.2
"""
def decode(self, message):
self.sequence_number = struct.unpack('<H', message.parameters_data[:2])[0]
self.data = message.data
class ComNTTransactRequest(Payload):
"""
References:
===========
- [MS-CIFS]: 2.2.4.62.1
"""
PAYLOAD_STRUCT_FORMAT = '<BHIIIIIIIIBH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def __init__(self, function, max_params_count, max_data_count, max_setup_count,
total_params_count = 0, total_data_count = 0,
params_bytes = '', setup_bytes = '', data_bytes = ''):
self.function = function
self.total_params_count = total_params_count or len(params_bytes)
self.total_data_count = total_data_count or len(data_bytes)
self.max_params_count = max_params_count
self.max_data_count = max_data_count
self.max_setup_count = max_setup_count
self.params_bytes = params_bytes
self.setup_bytes = setup_bytes
self.data_bytes = data_bytes
def initMessage(self, message):
Payload.initMessage(self, message)
message.command = SMB_COM_NT_TRANSACT
def prepare(self, message):
setup_bytes_len = len(self.setup_bytes)
params_bytes_len = len(self.params_bytes)
data_bytes_len = len(self.data_bytes)
padding0 = ''
offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_bytes_len + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters)
if offset % 4 != 0:
padding0 = '\0'*(4-offset%4)
offset += (4-offset%4)
if params_bytes_len > 0:
params_bytes_offset = offset
else:
params_bytes_offset = 0
offset += params_bytes_len
padding1 = ''
if offset % 4 != 0:
padding1 = '\0'*(4-offset%4)
offset += (4-offset%4)
if data_bytes_len > 0:
data_bytes_offset = offset
else:
data_bytes_offset = 0
message.parameters_data = \
struct.pack(self.PAYLOAD_STRUCT_FORMAT,
self.max_setup_count,
0x00, # Reserved1. Must be 0x00
self.total_params_count,
self.total_data_count,
self.max_params_count,
self.max_data_count,
params_bytes_len,
params_bytes_offset,
data_bytes_len,
data_bytes_offset,
int(setup_bytes_len / 2),
self.function) + \
self.setup_bytes
message.data = padding0 + self.params_bytes + padding1 + self.data_bytes
class ComNTTransactResponse(Payload):
"""
Contains information about a SMB_COM_NT_TRANSACT response from the server
After decoding, each instance contains the following attributes:
- total_params_count (integer)
- total_data_count (integer)
- setup_bytes (string)
- data_bytes (string)
- params_bytes (string)
References:
===========
- [MS-CIFS]: 2.2.4.62.2
"""
PAYLOAD_STRUCT_FORMAT = '<3sIIIIIIIIBH'
PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT)
def decode(self, message):
assert message.command == SMB_COM_NT_TRANSACT
if not message.status.hasError:
_, self.total_params_count, self.total_data_count, \
params_count, params_offset, params_displ, \
data_count, data_offset, data_displ, setup_count = struct.unpack(self.PAYLOAD_STRUCT_FORMAT,
message.parameters_data[:self.PAYLOAD_STRUCT_SIZE])
self.setup_bytes = message.parameters_data[self.PAYLOAD_STRUCT_SIZE:self.PAYLOAD_STRUCT_SIZE+setup_count*2]
if params_count > 0:
params_offset -= message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count*2 + 2
self.params_bytes = message.data[params_offset:params_offset+params_count]
else:
self.params_bytes = ''
if data_count > 0:
data_offset -= message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count*2 + 2
self.data_bytes = message.data[data_offset:data_offset+data_count]
else:
self.data_bytes = ''
| gpl-3.0 |
WholeGrainGoats/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/headerparserhandler.py | 638 | 9836 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""PythonHeaderParserHandler for mod_pywebsocket.
Apache HTTP Server and mod_python must be configured such that this
function is called to handle WebSocket request.
"""
import logging
from mod_python import apache
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import util
# PythonOption to specify the handler root directory.
_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
# PythonOption to specify the handler scan directory.
# This must be a directory under the root directory.
# The default is the root directory.
_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
# PythonOption to allow handlers whose canonical path is
# not under the root directory. It's disallowed by default.
# Set this option with value of 'yes' to allow.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
'mod_pywebsocket.allow_handlers_outside_root_dir')
# Map from values to their meanings. 'Yes' and 'No' are allowed just for
# compatibility.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
'off': False, 'no': False, 'on': True, 'yes': True}
# (Obsolete option. Ignored.)
# PythonOption to specify to allow handshake defined in Hixie 75 version
# protocol. The default is None (Off)
_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
# Map from values to their meanings.
_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
class ApacheLogHandler(logging.Handler):
"""Wrapper logging.Handler to emit log message to apache's error.log."""
_LEVELS = {
logging.DEBUG: apache.APLOG_DEBUG,
logging.INFO: apache.APLOG_INFO,
logging.WARNING: apache.APLOG_WARNING,
logging.ERROR: apache.APLOG_ERR,
logging.CRITICAL: apache.APLOG_CRIT,
}
def __init__(self, request=None):
logging.Handler.__init__(self)
self._log_error = apache.log_error
if request is not None:
self._log_error = request.log_error
# Time and level will be printed by Apache.
self._formatter = logging.Formatter('%(name)s: %(message)s')
def emit(self, record):
apache_level = apache.APLOG_DEBUG
if record.levelno in ApacheLogHandler._LEVELS:
apache_level = ApacheLogHandler._LEVELS[record.levelno]
msg = self._formatter.format(record)
# "server" parameter must be passed to have "level" parameter work.
# If only "level" parameter is passed, nothing shows up on Apache's
# log. However, at this point, we cannot get the server object of the
# virtual host which will process WebSocket requests. The only server
# object we can get here is apache.main_server. But Wherever (server
# configuration context or virtual host context) we put
# PythonHeaderParserHandler directive, apache.main_server just points
# the main server instance (not any of virtual server instance). Then,
# Apache follows LogLevel directive in the server configuration context
# to filter logs. So, we need to specify LogLevel in the server
# configuration context. Even if we specify "LogLevel debug" in the
# virtual host context which actually handles WebSocket connections,
# DEBUG level logs never show up unless "LogLevel debug" is specified
# in the server configuration context.
#
# TODO(tyoshino): Provide logging methods on request object. When
# request is mp_request object (when used together with Apache), the
# methods call request.log_error indirectly. When request is
# _StandaloneRequest, the methods call Python's logging facility which
# we create in standalone.py.
self._log_error(msg, apache_level, apache.main_server)
def _configure_logging():
logger = logging.getLogger()
# Logs are filtered by Apache based on LogLevel directive in Apache
# configuration file. We must just pass logs for all levels to
# ApacheLogHandler.
logger.setLevel(logging.DEBUG)
logger.addHandler(ApacheLogHandler())
_configure_logging()
_LOGGER = logging.getLogger(__name__)
def _parse_option(name, value, definition):
if value is None:
return False
meaning = definition.get(value.lower())
if meaning is None:
raise Exception('Invalid value for PythonOption %s: %r' %
(name, value))
return meaning
def _create_dispatcher():
_LOGGER.info('Initializing Dispatcher')
options = apache.main_server.get_options()
handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
if not handler_root:
raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
apache.APLOG_ERR)
handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
allow_handlers_outside_root = _parse_option(
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
dispatcher = dispatch.Dispatcher(
handler_root, handler_scan, allow_handlers_outside_root)
for warning in dispatcher.source_warnings():
apache.log_error(
'mod_pywebsocket: Warning in source loading: %s' % warning,
apache.APLOG_WARNING)
return dispatcher
# Initialize
_dispatcher = _create_dispatcher()
def headerparserhandler(request):
"""Handle request.
Args:
request: mod_python request.
This function is named headerparserhandler because it is the default
name for a PythonHeaderParserHandler.
"""
handshake_is_done = False
try:
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not _dispatcher.get_handler_suite(request.uri):
request.log_error(
'mod_pywebsocket: No handler for resource: %r' % request.uri,
apache.APLOG_INFO)
request.log_error(
'mod_pywebsocket: Fallback to Apache', apache.APLOG_INFO)
return apache.DECLINED
except dispatch.DispatchException, e:
request.log_error(
'mod_pywebsocket: Dispatch failed for error: %s' % e,
apache.APLOG_INFO)
if not handshake_is_done:
return e.status
try:
allow_draft75 = _parse_option(
_PYOPT_ALLOW_DRAFT75,
apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
_PYOPT_ALLOW_DRAFT75_DEFINITION)
try:
handshake.do_handshake(
request, _dispatcher, allowDraft75=allow_draft75)
except handshake.VersionException, e:
request.log_error(
'mod_pywebsocket: Handshake failed for version error: %s' % e,
apache.APLOG_INFO)
request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
return apache.HTTP_BAD_REQUEST
except handshake.HandshakeException, e:
# Handshake for ws/wss failed.
# Send http response with error status.
request.log_error(
'mod_pywebsocket: Handshake failed for error: %s' % e,
apache.APLOG_INFO)
return e.status
handshake_is_done = True
request._dispatcher = _dispatcher
_dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
request.log_error('mod_pywebsocket: Aborted: %s' % e, apache.APLOG_INFO)
except Exception, e:
# DispatchException can also be thrown if something is wrong in
# pywebsocket code. It's caught here, then.
request.log_error('mod_pywebsocket: Exception occurred: %s\n%s' %
(e, util.get_stack_trace()),
apache.APLOG_ERR)
# Unknown exceptions before handshake mean Apache must handle its
# request with another handler.
if not handshake_is_done:
return apache.DECLINED
# Set assbackwards to suppress response header generation by Apache.
request.assbackwards = 1
return apache.DONE # Return DONE such that no other handlers are invoked.
# vi:sts=4 sw=4 et
| mpl-2.0 |
divya-csekar/flask-microblog-server | flask/Lib/site-packages/sqlalchemy/engine/base.py | 22 | 70318 | # engine/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
"""Defines :class:`.Connection` and :class:`.Engine`.
"""
import sys
from .. import exc, util, log, interfaces
from ..sql import util as sql_util
from .interfaces import Connectable, ExceptionContext
from .util import _distill_params
import contextlib
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
Provides execution support for string-based SQL statements as well as
:class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator`
objects. Provides a :meth:`begin` method to return :class:`.Transaction`
objects.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single dbapi connection checked out
from the connection pool. In this state, the connection pool has no affect
upon the connection, including its expiration or timeout state. For the
connection pool to properly manage connections, connections should be
returned to the connection pool (i.e. ``connection.close()``) whenever the
connection is not in use.
.. index::
single: thread safety; Connection
"""
def __init__(self, engine, connection=None, close_with_result=False,
_branch=False, _execution_options=None,
_dispatch=None,
_has_events=None):
"""Construct a new Connection.
The constructor here is not public and is only called only by an
:class:`.Engine`. See :meth:`.Engine.connect` and
:meth:`.Engine.contextual_connect` methods.
"""
self.engine = engine
self.dialect = engine.dialect
self.__connection = connection or engine.raw_connection()
self.__transaction = None
self.should_close_with_result = close_with_result
self.__savepoint_seq = 0
self.__branch = _branch
self.__invalid = False
self.__can_reconnect = True
if _dispatch:
self.dispatch = _dispatch
elif _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events)
self._echo = self.engine._should_log_info()
if _execution_options:
self._execution_options =\
engine._execution_options.union(_execution_options)
else:
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self, _branch)
def _branch(self):
"""Return a new Connection which references this Connection's
engine and connection; but does not have close_with_result enabled,
and also whose close() method does nothing.
This is used to execute "sub" statements within a single execution,
usually an INSERT statement.
"""
return self.engine._connection_cls(
self.engine,
self.__connection,
_branch=True,
_has_events=self._has_events,
_dispatch=self.dispatch)
def _clone(self):
"""Create a shallow copy of this Connection.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
return c
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def execution_options(self, **opt):
""" Set non-SQL options for the connection which take effect
during execution.
The method returns a copy of this :class:`.Connection` which references
the same underlying DBAPI connection, but also defines the given
execution options which will take effect for a call to
:meth:`execute`. As the new :class:`.Connection` references the same
underlying resource, it's usually a good idea to ensure that the copies
will be discarded immediately, which is implicit if used as in::
result = connection.execution_options(stream_results=True).\\
execute(stmt)
Note that any key/value can be passed to
:meth:`.Connection.execution_options`, and it will be stored in the
``_execution_options`` dictionary of the :class:`.Connection`. It
is suitable for usage by end-user schemes to communicate with
event listeners, for example.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`.Connection`.
:param autocommit: Available on: Connection, statement.
When True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that DBAPI
connections by default are always in a transaction - SQLAlchemy uses
rules applied to different kinds of statements to determine if
COMMIT will be invoked in order to provide its "autocommit" feature.
Typically, all INSERT/UPDATE/DELETE statements as well as
CREATE/DROP statements have autocommit behavior enabled; SELECT
constructs do not. Use this option when invoking a SELECT or other
specific SQL construct where COMMIT is desired (typically when
calling stored procedures and such), and an explicit
transaction is not in progress.
:param compiled_cache: Available on: Connection.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`.Connection` compiles a clause
expression into a :class:`.Compiled` object.
It is the user's responsibility to
manage the size of this dictionary, which will have keys
corresponding to the dialect, clause element, the column
names within the VALUES or SET clause of an INSERT or UPDATE,
as well as the "batch" mode for an INSERT or UPDATE statement.
The format of this dictionary is not guaranteed to stay the
same in future releases.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param isolation_level: Available on: Connection.
Set the transaction isolation level for
the lifespan of this connection. Valid values include
those string values accepted by the ``isolation_level``
parameter passed to :func:`.create_engine`, and are
database specific, including those for :ref:`sqlite_toplevel`,
:ref:`postgresql_toplevel` - see those dialect's documentation
for further info.
Note that this option necessarily affects the underlying
DBAPI connection for the lifespan of the originating
:class:`.Connection`, and is not per-execution. This
setting is not removed until the underlying DBAPI connection
is returned to the connection pool, i.e.
the :meth:`.Connection.close` method is called.
:param no_parameters: When ``True``, if the final parameter
list or dictionary is totally empty, will invoke the
statement on the cursor as ``cursor.execute(statement)``,
not passing the parameter collection at all.
Some DBAPIs such as psycopg2 and mysql-python consider
percent signs as significant only when parameters are
present; this option allows code to generate SQL
containing percent signs (and possibly other characters)
that is neutral regarding whether it's executed by the DBAPI
or piped into a script that's later invoked by
command line tools.
.. versionadded:: 0.7.6
:param stream_results: Available on: Connection, statement.
Indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2 dialect.
"""
c = self._clone()
c._execution_options = c._execution_options.union(opt)
if self._has_events or self.engine._has_events:
self.dispatch.set_connection_execution_options(c, opt)
self.dialect.set_connection_execution_options(c, opt)
return c
@property
def closed(self):
"""Return True if this connection is closed."""
return '_Connection__connection' not in self.__dict__ \
and not self.__can_reconnect
@property
def invalidated(self):
"""Return True if this connection was invalidated."""
return self.__invalid
@property
def connection(self):
"The underlying DB-API connection managed by this Connection."
try:
return self.__connection
except AttributeError:
return self._revalidate_connection()
def _revalidate_connection(self):
if self.__can_reconnect and self.__invalid:
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Can't reconnect until invalid "
"transaction is rolled back")
self.__connection = self.engine.raw_connection()
self.__invalid = False
return self.__connection
raise exc.ResourceClosedError("This Connection is closed")
@property
def _connection_is_valid(self):
# use getattr() for is_valid to support exceptions raised in
# dialect initializer, where the connection is not wrapped in
# _ConnectionFairy
return getattr(self.__connection, 'is_valid', False)
@property
def _still_open_and_connection_is_valid(self):
return \
not self.closed and \
not self.invalidated and \
getattr(self.__connection, 'is_valid', False)
@property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.Connection`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`.Connection`.
"""
return self.connection.info
def connect(self):
"""Returns a branched version of this :class:`.Connection`.
The :meth:`.Connection.close` method on the returned
:class:`.Connection` can be called and this
:class:`.Connection` will remain open.
This method provides usage symmetry with
:meth:`.Engine.connect`, including for usage
with context managers.
"""
return self._branch()
def contextual_connect(self, **kwargs):
"""Returns a branched version of this :class:`.Connection`.
The :meth:`.Connection.close` method on the returned
:class:`.Connection` can be called and this
:class:`.Connection` will remain open.
This method provides usage symmetry with
:meth:`.Engine.contextual_connect`, including for usage
with context managers.
"""
return self._branch()
def invalidate(self, exception=None):
"""Invalidate the underlying DBAPI connection associated with
this :class:`.Connection`.
The underlying DBAPI connection is literally closed (if
possible), and is discarded. Its source connection pool will
typically lazily create a new connection to replace it.
Upon the next use (where "use" typically means using the
:meth:`.Connection.execute` method or similar),
this :class:`.Connection` will attempt to
procure a new DBAPI connection using the services of the
:class:`.Pool` as a source of connectivty (e.g. a "reconnection").
If a transaction was in progress (e.g. the
:meth:`.Connection.begin` method has been called) when
:meth:`.Connection.invalidate` method is called, at the DBAPI
level all state associated with this transaction is lost, as
the DBAPI connection is closed. The :class:`.Connection`
will not allow a reconnection to proceed until the
:class:`.Transaction` object is ended, by calling the
:meth:`.Transaction.rollback` method; until that point, any attempt at
continuing to use the :class:`.Connection` will raise an
:class:`~sqlalchemy.exc.InvalidRequestError`.
This is to prevent applications from accidentally
continuing an ongoing transactional operations despite the
fact that the transaction has been lost due to an
invalidation.
The :meth:`.Connection.invalidate` method, just like auto-invalidation,
will at the connection pool level invoke the
:meth:`.PoolEvents.invalidate` event.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.invalidated:
return
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
if self._connection_is_valid:
self.__connection.invalidate(exception)
del self.__connection
self.__invalid = True
def detach(self):
"""Detach the underlying DB-API connection from its connection pool.
E.g.::
with engine.connect() as conn:
conn.detach()
conn.execute("SET search_path TO schema1, schema2")
# work with connection
# connection is fully closed (since we used "with:", can
# also call .close())
This :class:`.Connection` instance will remain usable. When closed
(or exited from a context manager context as above),
the DB-API connection will be literally closed and not
returned to its originating pool.
This method can be used to insulate the rest of an application
from a modified state on a connection (such as a transaction
isolation level or similar).
"""
self.__connection.detach()
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of :class:`.Transaction`.
This object represents the "scope" of the transaction,
which completes when either the :meth:`.Transaction.rollback`
or :meth:`.Transaction.commit` method is called.
Nested calls to :meth:`.begin` on the same :class:`.Connection`
will return new :class:`.Transaction` objects that represent
an emulated transaction within the scope of the enclosing
transaction, that is::
trans = conn.begin() # outermost transaction
trans2 = conn.begin() # "nested"
trans2.commit() # does nothing
trans.commit() # actually commits
Calls to :meth:`.Transaction.commit` only have an effect
when invoked via the outermost :class:`.Transaction` object, though the
:meth:`.Transaction.rollback` method of any of the
:class:`.Transaction` objects will roll back the
transaction.
See also:
:meth:`.Connection.begin_nested` - use a SAVEPOINT
:meth:`.Connection.begin_twophase` - use a two phase /XID transaction
:meth:`.Engine.begin` - context manager available from
:class:`.Engine`.
"""
if self.__transaction is None:
self.__transaction = RootTransaction(self)
return self.__transaction
else:
return Transaction(self, self.__transaction)
def begin_nested(self):
"""Begin a nested transaction and return a transaction handle.
The returned object is an instance of :class:`.NestedTransaction`.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
``commit`` and ``rollback``, however the outermost transaction
still controls the overall ``commit`` or ``rollback`` of the
transaction of a whole.
See also :meth:`.Connection.begin`,
:meth:`.Connection.begin_twophase`.
"""
if self.__transaction is None:
self.__transaction = RootTransaction(self)
else:
self.__transaction = NestedTransaction(self, self.__transaction)
return self.__transaction
def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of :class:`.TwoPhaseTransaction`,
which in addition to the methods provided by
:class:`.Transaction`, also provides a
:meth:`~.TwoPhaseTransaction.prepare` method.
:param xid: the two phase transaction id. If not supplied, a
random id will be generated.
See also :meth:`.Connection.begin`,
:meth:`.Connection.begin_twophase`.
"""
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress.")
if xid is None:
xid = self.engine.dialect.create_xid()
self.__transaction = TwoPhaseTransaction(self, xid)
return self.__transaction
def recover_twophase(self):
return self.engine.dialect.do_recover_twophase(self)
def rollback_prepared(self, xid, recover=False):
self.engine.dialect.do_rollback_twophase(self, xid, recover=recover)
def commit_prepared(self, xid, recover=False):
self.engine.dialect.do_commit_twophase(self, xid, recover=recover)
def in_transaction(self):
"""Return True if a transaction is in progress."""
return self.__transaction is not None
def _begin_impl(self, transaction):
if self._echo:
self.engine.logger.info("BEGIN (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin(self)
try:
self.engine.dialect.do_begin(self.connection)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _rollback_impl(self):
if self._has_events or self.engine._has_events:
self.dispatch.rollback(self)
if self._still_open_and_connection_is_valid:
if self._echo:
self.engine.logger.info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if not self.__invalid and \
self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_impl(self, autocommit=False):
if self._has_events or self.engine._has_events:
self.dispatch.commit(self)
if self._echo:
self.engine.logger.info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if not self.__invalid and \
self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
def _savepoint_impl(self, name=None):
if self._has_events or self.engine._has_events:
self.dispatch.savepoint(self, name)
if name is None:
self.__savepoint_seq += 1
name = 'sa_savepoint_%s' % self.__savepoint_seq
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_savepoint(self, name)
return name
def _rollback_to_savepoint_impl(self, name, context):
if self._has_events or self.engine._has_events:
self.dispatch.rollback_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_rollback_to_savepoint(self, name)
self.__transaction = context
def _release_savepoint_impl(self, name, context):
if self._has_events or self.engine._has_events:
self.dispatch.release_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_release_savepoint(self, name)
self.__transaction = context
def _begin_twophase_impl(self, transaction):
if self._echo:
self.engine.logger.info("BEGIN TWOPHASE (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin_twophase(self, transaction.xid)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_begin_twophase(self, transaction.xid)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
def _prepare_twophase_impl(self, xid):
if self._has_events or self.engine._has_events:
self.dispatch.prepare_twophase(self, xid)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
self.engine.dialect.do_prepare_twophase(self, xid)
def _rollback_twophase_impl(self, xid, is_prepared):
if self._has_events or self.engine._has_events:
self.dispatch.rollback_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_rollback_twophase(
self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_twophase_impl(self, xid, is_prepared):
if self._has_events or self.engine._has_events:
self.dispatch.commit_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_commit_twophase(self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _autorollback(self):
if not self.in_transaction():
self._rollback_impl()
def close(self):
"""Close this :class:`.Connection`.
This results in a release of the underlying database
resources, that is, the DBAPI connection referenced
internally. The DBAPI connection is typically restored
back to the connection-holding :class:`.Pool` referenced
by the :class:`.Engine` that produced this
:class:`.Connection`. Any transactional state present on
the DBAPI connection is also unconditionally released via
the DBAPI connection's ``rollback()`` method, regardless
of any :class:`.Transaction` object that may be
outstanding with regards to this :class:`.Connection`.
After :meth:`~.Connection.close` is called, the
:class:`.Connection` is permanently in a closed state,
and will allow no further operations.
"""
try:
conn = self.__connection
except AttributeError:
pass
else:
if not self.__branch:
conn.close()
if conn._reset_agent is self.__transaction:
conn._reset_agent = None
# the close() process can end up invalidating us,
# as the pool will call our transaction as the "reset_agent"
# for rollback(), which can then cause an invalidation
if not self.__invalid:
del self.__connection
self.__can_reconnect = False
self.__transaction = None
def scalar(self, object, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(object, *multiparams, **params).scalar()
def execute(self, object, *multiparams, **params):
"""Executes the a SQL statement construct and returns a
:class:`.ResultProxy`.
:param object: The statement to be executed. May be
one of:
* a plain string
* any :class:`.ClauseElement` construct that is also
a subclass of :class:`.Executable`, such as a
:func:`~.expression.select` construct
* a :class:`.FunctionElement`, such as that generated
by :attr:`.func`, will be automatically wrapped in
a SELECT statement, which is then executed.
* a :class:`.DDLElement` object
* a :class:`.DefaultGenerator` object
* a :class:`.Compiled` object
:param \*multiparams/\**params: represent bound parameter
values to be used in the execution. Typically,
the format is either a collection of one or more
dictionaries passed to \*multiparams::
conn.execute(
table.insert(),
{"id":1, "value":"v1"},
{"id":2, "value":"v2"}
)
...or individual key/values interpreted by \**params::
conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, and the underlying
DBAPI accepts positional bind parameters, a collection of tuples
or individual values in \*multiparams may be passed::
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, "v1"), (2, "v2")
)
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
1, "v1"
)
Note above, the usage of a question mark "?" or other
symbol is contingent upon the "paramstyle" accepted by the DBAPI
in use, which may be any of "qmark", "named", "pyformat", "format",
"numeric". See `pep-249 <http://www.python.org/dev/peps/pep-0249/>`_
for details on paramstyle.
To execute a textual SQL statement which uses bound parameters in a
DBAPI-agnostic way, use the :func:`~.expression.text` construct.
"""
if isinstance(object, util.string_types[0]):
return self._execute_text(object, multiparams, params)
try:
meth = object._execute_on_connection
except AttributeError:
raise exc.InvalidRequestError(
"Unexecutable object type: %s" %
type(object))
else:
return meth(self, multiparams, params)
def _execute_function(self, func, multiparams, params):
"""Execute a sql.FunctionElement object."""
return self._execute_clauseelement(func.select(),
multiparams, params)
def _execute_default(self, default, multiparams, params):
"""Execute a schema.ColumnDefault object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
default, multiparams, params = \
fn(self, default, multiparams, params)
try:
try:
conn = self.__connection
except AttributeError:
conn = self._revalidate_connection()
dialect = self.dialect
ctx = dialect.execution_ctx_cls._init_default(
dialect, self, conn)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
ret = ctx._exec_default(default, None)
if self.should_close_with_result:
self.close()
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
default, multiparams, params, ret)
return ret
def _execute_ddl(self, ddl, multiparams, params):
"""Execute a schema.DDL object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
ddl, multiparams, params = \
fn(self, ddl, multiparams, params)
dialect = self.dialect
compiled = ddl.compile(dialect=dialect)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
compiled,
None,
compiled
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
ddl, multiparams, params, ret)
return ret
def _execute_clauseelement(self, elem, multiparams, params):
"""Execute a sql.ClauseElement object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
elem, multiparams, params = \
fn(self, elem, multiparams, params)
distilled_params = _distill_params(multiparams, params)
if distilled_params:
# note this is usually dict but we support RowProxy
# as well; but dict.keys() as an iterable is OK
keys = distilled_params[0].keys()
else:
keys = []
dialect = self.dialect
if 'compiled_cache' in self._execution_options:
key = dialect, elem, tuple(sorted(keys)), len(distilled_params) > 1
if key in self._execution_options['compiled_cache']:
compiled_sql = self._execution_options['compiled_cache'][key]
else:
compiled_sql = elem.compile(
dialect=dialect, column_keys=keys,
inline=len(distilled_params) > 1)
self._execution_options['compiled_cache'][key] = compiled_sql
else:
compiled_sql = elem.compile(
dialect=dialect, column_keys=keys,
inline=len(distilled_params) > 1)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled_sql,
distilled_params,
compiled_sql, distilled_params
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
elem, multiparams, params, ret)
return ret
def _execute_compiled(self, compiled, multiparams, params):
"""Execute a sql.Compiled object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
compiled, multiparams, params = \
fn(self, compiled, multiparams, params)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled,
parameters,
compiled, parameters
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
compiled, multiparams, params, ret)
return ret
def _execute_text(self, statement, multiparams, params):
"""Execute a string SQL statement."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
statement, multiparams, params = \
fn(self, statement, multiparams, params)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_statement,
statement,
parameters,
statement, parameters
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
statement, multiparams, params, ret)
return ret
def _execute_context(self, dialect, constructor,
statement, parameters,
*args):
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`.ResultProxy`."""
try:
try:
conn = self.__connection
except AttributeError:
conn = self._revalidate_connection()
context = constructor(dialect, self, conn, *args)
except Exception as e:
self._handle_dbapi_exception(e,
util.text_type(statement), parameters,
None, None)
if context.compiled:
context.pre_exec()
cursor, statement, parameters = context.cursor, \
context.statement, \
context.parameters
if not context.executemany:
parameters = parameters[0]
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = \
fn(self, cursor, statement, parameters,
context, context.executemany)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info(
"%r",
sql_util._repr_params(parameters, batches=10)
)
try:
if context.executemany:
for fn in () if not self.dialect._has_events \
else self.dialect.dispatch.do_executemany:
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_executemany(
cursor,
statement,
parameters,
context)
elif not parameters and context.no_parameters:
for fn in () if not self.dialect._has_events \
else self.dialect.dispatch.do_execute_no_params:
if fn(cursor, statement, context):
break
else:
self.dialect.do_execute_no_params(
cursor,
statement,
context)
else:
for fn in () if not self.dialect._has_events \
else self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(
cursor,
statement,
parameters,
context)
except Exception as e:
self._handle_dbapi_exception(
e,
statement,
parameters,
cursor,
context)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(self, cursor,
statement,
parameters,
context,
context.executemany)
if context.compiled:
context.post_exec()
if context.isinsert and not context.executemany:
context.post_insert()
# create a resultproxy, get rowcount/implicit RETURNING
# rows, close cursor if no further results pending
result = context.get_result_proxy()
if context.isinsert:
if context._is_implicit_returning:
context._fetch_implicit_returning(result)
result.close(_autoclose_connection=False)
result._metadata = None
elif not context._is_explicit_returning:
result.close(_autoclose_connection=False)
result._metadata = None
elif context.isupdate and context._is_implicit_returning:
context._fetch_implicit_update_returning(result)
result.close(_autoclose_connection=False)
result._metadata = None
elif result._metadata is None:
# no results, get rowcount
# (which requires open cursor on some drivers
# such as kintersbasdb, mxodbc),
result.rowcount
result.close(_autoclose_connection=False)
if self.__transaction is None and context.should_autocommit:
self._commit_impl(autocommit=True)
if result.closed and self.should_close_with_result:
self.close()
return result
def _cursor_execute(self, cursor, statement, parameters, context=None):
"""Execute a statement + params on the given cursor.
Adds appropriate logging and exception handling.
This method is used by DefaultDialect for special-case
executions, such as for sequences and column defaults.
The path of statement execution in the majority of cases
terminates at _execute_context().
"""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = \
fn(self, cursor, statement, parameters,
context,
False)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r", parameters)
try:
for fn in () if not self.dialect._has_events \
else self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(
cursor,
statement,
parameters,
context)
except Exception as e:
self._handle_dbapi_exception(
e,
statement,
parameters,
cursor,
context)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(self, cursor,
statement,
parameters,
context,
False)
def _safe_close_cursor(self, cursor):
"""Close the given cursor, catching exceptions
and turning into log warnings.
"""
try:
cursor.close()
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
# log the error through the connection pool's logger.
self.engine.pool.logger.error(
"Error closing cursor", exc_info=True)
_reentrant_error = False
_is_disconnect = False
def _handle_dbapi_exception(self,
e,
statement,
parameters,
cursor,
context):
exc_info = sys.exc_info()
if context and context.exception is None:
context.exception = e
if not self._is_disconnect:
self._is_disconnect = \
isinstance(e, self.dialect.dbapi.Error) and \
not self.closed and \
self.dialect.is_disconnect(e, self.__connection, cursor)
if context:
context.is_disconnect = self._is_disconnect
if self._reentrant_error:
util.raise_from_cause(
exc.DBAPIError.instance(statement,
parameters,
e,
self.dialect.dbapi.Error),
exc_info
)
self._reentrant_error = True
try:
# non-DBAPI error - if we already got a context,
# or there's no string statement, don't wrap it
should_wrap = isinstance(e, self.dialect.dbapi.Error) or \
(statement is not None and context is None)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
connection_invalidated=self._is_disconnect)
else:
sqlalchemy_exception = None
newraise = None
if self._has_events or self.engine._has_events:
# legacy dbapi_error event
if should_wrap and context:
self.dispatch.dbapi_error(self,
cursor,
statement,
parameters,
context,
e)
# new handle_error event
ctx = ExceptionContextImpl(
e, sqlalchemy_exception, self, cursor, statement,
parameters, context, self._is_disconnect)
for fn in self.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if sqlalchemy_exception and \
self._is_disconnect != ctx.is_disconnect:
sqlalchemy_exception.connection_invalidated = \
self._is_disconnect = ctx.is_disconnect
if should_wrap and context:
context.handle_dbapi_exception(e)
if not self._is_disconnect:
if cursor:
self._safe_close_cursor(cursor)
self._autorollback()
if newraise:
util.raise_from_cause(newraise, exc_info)
elif should_wrap:
util.raise_from_cause(
sqlalchemy_exception,
exc_info
)
else:
util.reraise(*exc_info)
finally:
del self._reentrant_error
if self._is_disconnect:
del self._is_disconnect
dbapi_conn_wrapper = self.connection
self.engine.pool._invalidate(dbapi_conn_wrapper, e)
self.invalidate(e)
if self.should_close_with_result:
self.close()
def default_schema_name(self):
return self.engine.dialect.get_default_schema_name(self)
def transaction(self, callable_, *args, **kwargs):
"""Execute the given function within a transaction boundary.
The function is passed this :class:`.Connection`
as the first argument, followed by the given \*args and \**kwargs,
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
conn.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Connection.begin`::
with conn.begin():
conn.execute("some statement", {'x':5, 'y':10})
As well as with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
See also:
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Engine.transaction` - engine-level version of
:meth:`.Connection.transaction`
"""
trans = self.begin()
try:
ret = self.run_callable(callable_, *args, **kwargs)
trans.commit()
return ret
except:
with util.safe_reraise():
trans.rollback()
def run_callable(self, callable_, *args, **kwargs):
"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Engine.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
return callable_(self, *args, **kwargs)
def _run_visitor(self, visitorcallable, element, **kwargs):
visitorcallable(self.dialect, self,
**kwargs).traverse_single(element)
class ExceptionContextImpl(ExceptionContext):
"""Implement the :class:`.ExceptionContext` interface."""
def __init__(self, exception, sqlalchemy_exception,
connection, cursor, statement, parameters,
context, is_disconnect):
self.connection = connection
self.sqlalchemy_exception = sqlalchemy_exception
self.original_exception = exception
self.execution_context = context
self.statement = statement
self.parameters = parameters
self.is_disconnect = is_disconnect
class Transaction(object):
"""Represent a database transaction in progress.
The :class:`.Transaction` object is procured by
calling the :meth:`~.Connection.begin` method of
:class:`.Connection`::
from sqlalchemy import create_engine
engine = create_engine("postgresql://scott:tiger@localhost/test")
connection = engine.connect()
trans = connection.begin()
connection.execute("insert into x (a, b) values (1, 2)")
trans.commit()
The object provides :meth:`.rollback` and :meth:`.commit`
methods in order to control transaction boundaries. It
also implements a context manager interface so that
the Python ``with`` statement can be used with the
:meth:`.Connection.begin` method::
with connection.begin():
connection.execute("insert into x (a, b) values (1, 2)")
The Transaction object is **not** threadsafe.
See also: :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`,
:meth:`.Connection.begin_nested`.
.. index::
single: thread safety; Transaction
"""
def __init__(self, connection, parent):
self.connection = connection
self._parent = parent or self
self.is_active = True
def close(self):
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
if not self._parent.is_active:
return
if self._parent is self:
self.rollback()
def rollback(self):
"""Roll back this :class:`.Transaction`.
"""
if not self._parent.is_active:
return
self._do_rollback()
self.is_active = False
def _do_rollback(self):
self._parent.rollback()
def commit(self):
"""Commit this :class:`.Transaction`."""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self._do_commit()
self.is_active = False
def _do_commit(self):
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None and self.is_active:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class RootTransaction(Transaction):
def __init__(self, connection):
super(RootTransaction, self).__init__(connection, None)
self.connection._begin_impl(self)
def _do_rollback(self):
if self.is_active:
self.connection._rollback_impl()
def _do_commit(self):
if self.is_active:
self.connection._commit_impl()
class NestedTransaction(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
A new :class:`.NestedTransaction` object may be procured
using the :meth:`.Connection.begin_nested` method.
The interface is the same as that of :class:`.Transaction`.
"""
def __init__(self, connection, parent):
super(NestedTransaction, self).__init__(connection, parent)
self._savepoint = self.connection._savepoint_impl()
def _do_rollback(self):
if self.is_active:
self.connection._rollback_to_savepoint_impl(
self._savepoint, self._parent)
def _do_commit(self):
if self.is_active:
self.connection._release_savepoint_impl(
self._savepoint, self._parent)
class TwoPhaseTransaction(Transaction):
"""Represent a two-phase transaction.
A new :class:`.TwoPhaseTransaction` object may be procured
using the :meth:`.Connection.begin_twophase` method.
The interface is the same as that of :class:`.Transaction`
with the addition of the :meth:`prepare` method.
"""
def __init__(self, connection, xid):
super(TwoPhaseTransaction, self).__init__(connection, None)
self._is_prepared = False
self.xid = xid
self.connection._begin_twophase_impl(self)
def prepare(self):
"""Prepare this :class:`.TwoPhaseTransaction`.
After a PREPARE, the transaction can be committed.
"""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self.connection._prepare_twophase_impl(self.xid)
self._is_prepared = True
def _do_rollback(self):
self.connection._rollback_twophase_impl(self.xid, self._is_prepared)
def _do_commit(self):
self.connection._commit_twophase_impl(self.xid, self._is_prepared)
class Engine(Connectable, log.Identified):
"""
Connects a :class:`~sqlalchemy.pool.Pool` and
:class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a
source of database connectivity and behavior.
An :class:`.Engine` object is instantiated publicly using the
:func:`~sqlalchemy.create_engine` function.
See also:
:doc:`/core/engines`
:ref:`connections_toplevel`
"""
_execution_options = util.immutabledict()
_has_events = False
_connection_cls = Connection
def __init__(self, pool, dialect, url,
logging_name=None, echo=None, proxy=None,
execution_options=None
):
self.pool = pool
self.url = url
self.dialect = dialect
self.pool._dialect = dialect
if logging_name:
self.logging_name = logging_name
self.echo = echo
self.engine = self
log.instance_logger(self, echoflag=echo)
if proxy:
interfaces.ConnectionProxy._adapt_listener(self, proxy)
if execution_options:
self.update_execution_options(**execution_options)
def update_execution_options(self, **opt):
"""Update the default execution_options dictionary
of this :class:`.Engine`.
The given keys/values in \**opt are added to the
default execution options that will be used for
all connections. The initial contents of this dictionary
can be sent via the ``execution_options`` parameter
to :func:`.create_engine`.
.. seealso::
:meth:`.Connection.execution_options`
:meth:`.Engine.execution_options`
"""
self._execution_options = \
self._execution_options.union(opt)
self.dispatch.set_engine_execution_options(self, opt)
self.dialect.set_engine_execution_options(self, opt)
def execution_options(self, **opt):
"""Return a new :class:`.Engine` that will provide
:class:`.Connection` objects with the given execution options.
The returned :class:`.Engine` remains related to the original
:class:`.Engine` in that it shares the same connection pool and
other state:
* The :class:`.Pool` used by the new :class:`.Engine` is the
same instance. The :meth:`.Engine.dispose` method will replace
the connection pool instance for the parent engine as well
as this one.
* Event listeners are "cascaded" - meaning, the new :class:`.Engine`
inherits the events of the parent, and new events can be associated
with the new :class:`.Engine` individually.
* The logging configuration and logging_name is copied from the parent
:class:`.Engine`.
The intent of the :meth:`.Engine.execution_options` method is
to implement "sharding" schemes where multiple :class:`.Engine`
objects refer to the same connection pool, but are differentiated
by options that would be consumed by a custom event::
primary_engine = create_engine("mysql://")
shard1 = primary_engine.execution_options(shard_id="shard1")
shard2 = primary_engine.execution_options(shard_id="shard2")
Above, the ``shard1`` engine serves as a factory for
:class:`.Connection` objects that will contain the execution option
``shard_id=shard1``, and ``shard2`` will produce :class:`.Connection`
objects that contain the execution option ``shard_id=shard2``.
An event handler can consume the above execution option to perform
a schema switch or other operation, given a connection. Below
we emit a MySQL ``use`` statement to switch databases, at the same
time keeping track of which database we've established using the
:attr:`.Connection.info` dictionary, which gives us a persistent
storage space that follows the DBAPI connection::
from sqlalchemy import event
from sqlalchemy.engine import Engine
shards = {"default": "base", shard_1: "db1", "shard_2": "db2"}
@event.listens_for(Engine, "before_cursor_execute")
def _switch_shard(conn, cursor, stmt,
params, context, executemany):
shard_id = conn._execution_options.get('shard_id', "default")
current_shard = conn.info.get("current_shard", None)
if current_shard != shard_id:
cursor.execute("use %s" % shards[shard_id])
conn.info["current_shard"] = shard_id
.. versionadded:: 0.8
.. seealso::
:meth:`.Connection.execution_options` - update execution options
on a :class:`.Connection` object.
:meth:`.Engine.update_execution_options` - update the execution
options for a given :class:`.Engine` in place.
"""
return OptionEngine(self, opt)
@property
def name(self):
"""String name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.name
@property
def driver(self):
"""Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.driver
echo = log.echo_property()
def __repr__(self):
return 'Engine(%r)' % self.url
def dispose(self):
"""Dispose of the connection pool used by this :class:`.Engine`.
A new connection pool is created immediately after the old one has
been disposed. This new pool, like all SQLAlchemy connection pools,
does not make any actual connections to the database until one is
first requested.
This method has two general use cases:
* When a dropped connection is detected, it is assumed that all
connections held by the pool are potentially dropped, and
the entire pool is replaced.
* An application may want to use :meth:`dispose` within a test
suite that is creating multiple engines.
It is critical to note that :meth:`dispose` does **not** guarantee
that the application will release all open database connections - only
those connections that are checked into the pool are closed.
Connections which remain checked out or have been detached from
the engine are not affected.
"""
self.pool.dispose()
self.pool = self.pool.recreate()
def _execute_default(self, default):
with self.contextual_connect() as conn:
return conn._execute_default(default, (), {})
@contextlib.contextmanager
def _optional_conn_ctx_manager(self, connection=None):
if connection is None:
with self.contextual_connect() as conn:
yield conn
else:
yield connection
def _run_visitor(self, visitorcallable, element,
connection=None, **kwargs):
with self._optional_conn_ctx_manager(connection) as conn:
conn._run_visitor(visitorcallable, element, **kwargs)
class _trans_ctx(object):
def __init__(self, conn, transaction, close_with_result):
self.conn = conn
self.transaction = transaction
self.close_with_result = close_with_result
def __enter__(self):
return self.conn
def __exit__(self, type, value, traceback):
if type is not None:
self.transaction.rollback()
else:
self.transaction.commit()
if not self.close_with_result:
self.conn.close()
def begin(self, close_with_result=False):
"""Return a context manager delivering a :class:`.Connection`
with a :class:`.Transaction` established.
E.g.::
with engine.begin() as conn:
conn.execute("insert into table (x, y, z) values (1, 2, 3)")
conn.execute("my_special_procedure(5)")
Upon successful operation, the :class:`.Transaction`
is committed. If an error is raised, the :class:`.Transaction`
is rolled back.
The ``close_with_result`` flag is normally ``False``, and indicates
that the :class:`.Connection` will be closed when the operation
is complete. When set to ``True``, it indicates the
:class:`.Connection` is in "single use" mode, where the
:class:`.ResultProxy` returned by the first call to
:meth:`.Connection.execute` will close the :class:`.Connection` when
that :class:`.ResultProxy` has exhausted all result rows.
.. versionadded:: 0.7.6
See also:
:meth:`.Engine.connect` - procure a :class:`.Connection` from
an :class:`.Engine`.
:meth:`.Connection.begin` - start a :class:`.Transaction`
for a particular :class:`.Connection`.
"""
conn = self.contextual_connect(close_with_result=close_with_result)
try:
trans = conn.begin()
except:
with util.safe_reraise():
conn.close()
return Engine._trans_ctx(conn, trans, close_with_result)
def transaction(self, callable_, *args, **kwargs):
"""Execute the given function within a transaction boundary.
The function is passed a :class:`.Connection` newly procured
from :meth:`.Engine.contextual_connect` as the first argument,
followed by the given \*args and \**kwargs.
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
engine.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
See also:
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Connection.transaction` - connection-level version of
:meth:`.Engine.transaction`
"""
with self.contextual_connect() as conn:
return conn.transaction(callable_, *args, **kwargs)
def run_callable(self, callable_, *args, **kwargs):
"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Connection.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
with self.contextual_connect() as conn:
return conn.run_callable(callable_, *args, **kwargs)
def execute(self, statement, *multiparams, **params):
"""Executes the given construct and returns a :class:`.ResultProxy`.
The arguments are the same as those used by
:meth:`.Connection.execute`.
Here, a :class:`.Connection` is acquired using the
:meth:`~.Engine.contextual_connect` method, and the statement executed
with that connection. The returned :class:`.ResultProxy` is flagged
such that when the :class:`.ResultProxy` is exhausted and its
underlying cursor is closed, the :class:`.Connection` created here
will also be closed, which allows its associated DBAPI connection
resource to be returned to the connection pool.
"""
connection = self.contextual_connect(close_with_result=True)
return connection.execute(statement, *multiparams, **params)
def scalar(self, statement, *multiparams, **params):
return self.execute(statement, *multiparams, **params).scalar()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
connection = self.contextual_connect(close_with_result=True)
return connection._execute_clauseelement(elem, multiparams, params)
def _execute_compiled(self, compiled, multiparams, params):
connection = self.contextual_connect(close_with_result=True)
return connection._execute_compiled(compiled, multiparams, params)
def connect(self, **kwargs):
"""Return a new :class:`.Connection` object.
The :class:`.Connection` object is a facade that uses a DBAPI
connection internally in order to communicate with the database. This
connection is procured from the connection-holding :class:`.Pool`
referenced by this :class:`.Engine`. When the
:meth:`~.Connection.close` method of the :class:`.Connection` object
is called, the underlying DBAPI connection is then returned to the
connection pool, where it may be used again in a subsequent call to
:meth:`~.Engine.connect`.
"""
return self._connection_cls(self, **kwargs)
def contextual_connect(self, close_with_result=False, **kwargs):
"""Return a :class:`.Connection` object which may be part of some
ongoing context.
By default, this method does the same thing as :meth:`.Engine.connect`.
Subclasses of :class:`.Engine` may override this method
to provide contextual behavior.
:param close_with_result: When True, the first :class:`.ResultProxy`
created by the :class:`.Connection` will call the
:meth:`.Connection.close` method of that connection as soon as any
pending result rows are exhausted. This is used to supply the
"connectionless execution" behavior provided by the
:meth:`.Engine.execute` method.
"""
return self._connection_cls(self,
self.pool.connect(),
close_with_result=close_with_result,
**kwargs)
def table_names(self, schema=None, connection=None):
"""Return a list of all table names available in the database.
:param schema: Optional, retrieve names from a non-default schema.
:param connection: Optional, use a specified connection. Default is
the ``contextual_connect`` for this ``Engine``.
"""
with self._optional_conn_ctx_manager(connection) as conn:
if not schema:
schema = self.dialect.default_schema_name
return self.dialect.get_table_names(conn, schema)
def has_table(self, table_name, schema=None):
"""Return True if the given backend has a table of the given name.
.. seealso::
:ref:`metadata_reflection_inspector` - detailed schema inspection
using the :class:`.Inspector` interface.
:class:`.quoted_name` - used to pass quoting information along
with a schema identifier.
"""
return self.run_callable(self.dialect.has_table, table_name, schema)
def raw_connection(self):
"""Return a "raw" DBAPI connection from the connection pool.
The returned object is a proxied version of the DBAPI
connection object used by the underlying driver in use.
The object will have all the same behavior as the real DBAPI
connection, except that its ``close()`` method will result in the
connection being returned to the pool, rather than being closed
for real.
This method provides direct DBAPI connection access for
special situations. In most situations, the :class:`.Connection`
object should be used, which is procured using the
:meth:`.Engine.connect` method.
"""
return self.pool.unique_connection()
class OptionEngine(Engine):
def __init__(self, proxied, execution_options):
self._proxied = proxied
self.url = proxied.url
self.dialect = proxied.dialect
self.logging_name = proxied.logging_name
self.echo = proxied.echo
log.instance_logger(self, echoflag=self.echo)
self.dispatch = self.dispatch._join(proxied.dispatch)
self._execution_options = proxied._execution_options
self.update_execution_options(**execution_options)
def _get_pool(self):
return self._proxied.pool
def _set_pool(self, pool):
self._proxied.pool = pool
pool = property(_get_pool, _set_pool)
def _get_has_events(self):
return self._proxied._has_events or \
self.__dict__.get('_has_events', False)
def _set_has_events(self, value):
self.__dict__['_has_events'] = value
_has_events = property(_get_has_events, _set_has_events)
| bsd-3-clause |
pasqualguerrero/django | tests/schema/tests.py | 32 | 71673 | import datetime
import itertools
import unittest
from copy import copy
from django.db import (
DatabaseError, IntegrityError, OperationalError, connection,
)
from django.db.models import Model
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
DateField, DateTimeField, IntegerField, PositiveIntegerField, SlugField,
TextField, TimeField,
)
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ManyToManyField, OneToOneField,
)
from django.db.transaction import atomic
from django.test import TransactionTestCase, skipIfDBFeature
from .fields import (
CustomManyToManyField, InheritedManyToManyField, MediumBlobField,
)
from .models import (
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor,
BookWithSlug, IntegerPK, Note, NoteRename, Tag, TagIndexed, TagM2MTest,
TagUniqueRename, Thing, UniqueTest, new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests that the schema-alteration code works correctly.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Note,
Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if 'schema' in new_apps.all_models:
for model in self.local_models:
del new_apps.all_models['schema'][model._meta.model_name]
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.table_name_converter
with connection.cursor() as cursor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names(cursor)
for model in itertools.chain(SchemaTests.models, self.local_models):
# Remove any M2M tables first
for field in model._meta.local_many_to_many:
with atomic():
tbl = converter(field.remote_field.through._meta.db_table)
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
# Then remove the main tables
with atomic():
tbl = converter(model._meta.db_table)
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_indexes(cursor, table)
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check that it's there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Author.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk(self):
"Tests that creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Check that initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk_db_constraint(self):
"Tests that the db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Check that initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
# Check that BookWeak doesn't have an FK constraint
constraints = self.get_constraints(BookWeak._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.fail("FK constraint for author_id found")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for tag_id found")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Check that initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(new_field.remote_field.through._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['age'][0], "IntegerField")
self.assertEqual(columns['age'][1][6], True)
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['surname'][0], "CharField")
self.assertEqual(columns['surname'][1][6],
connection.features.interprets_empty_strings_as_nulls)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns['awesome'][0]
self.assertEqual(field_type, connection.features.introspected_boolean_field_type(new_field, created_separately=True))
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns['thing']
self.assertEqual(field_type, 'IntegerField')
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns['bits'][0], ("BinaryField", "TextField"))
@unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b'123')
new_field.set_attributes_from_name('bits')
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns['bits'][0], "TextField")
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(columns['name'][1][6], True)
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05')
old_field = Note._meta.get_field('info')
new_field = DateField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05 3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = TimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='aaa')
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='bbb')
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns['height'][1][6])
# Create some test data
Author.objects.create(name='Not null author', height=12)
Author.objects.create(name='Null author')
# Verify null value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertIsNone(Author.objects.get(name='Null author').height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertFalse(columns['height'][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertEqual(Author.objects.get(name='Null author').height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field('name')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field('info')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field)
@unittest.skipUnless(connection.features.supports_combined_alters, "No combined ALTER support")
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns['height'][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithDefaultHeight, old_field, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns['height'][1][6])
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for name, details in constraints.items():
if details['foreign_key']:
self.fail('Found an unexpected FK constraint to %s' % details['columns'])
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
constraints = self.get_constraints(LocalBook._meta.db_table)
# Ensure FK constraint exists
for name, details in constraints.items():
if details['foreign_key'] and details['columns'] == ["author_id"]:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
BookWithO2O.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique anymore
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is still present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
Book.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique now
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name='Foo')
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field('i')
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = 'INTEGERPK'
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# model requires a new PK
old_field = IntegerPK._meta.get_field('j')
new_field = IntegerField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('j')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
old_field = IntegerPK._meta.get_field('i')
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['display_name'][0], "CharField")
self.assertNotIn("name", columns)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns['detail_info'][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [
LocalBookWithM2M,
LocalBookWithM2M._meta.get_field('tags').remote_field.through,
]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough")
tag = ForeignKey("schema.TagM2MTest")
class Meta:
app_label = 'schema'
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(columns['book_id'][0], "IntegerField")
self.assertEqual(columns['tag_id'][0], "IntegerField")
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
self.local_models += [new_field.remote_field.through]
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.remote_field.through)
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough")
tag = ForeignKey("schema.TagM2MTest")
class Meta:
app_label = 'schema'
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [
LocalBookWithM2M,
LocalBookWithM2M._meta.get_field('tags').remote_field.through,
]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
constraints = self.get_constraints(LocalBookWithM2M._meta.get_field("tags").remote_field.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id'))
break
else:
self.fail("No FK constraint for tagm2mtest_id found")
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
self.local_models += [new_field.remote_field.through]
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field)
# Ensure old M2M is gone
self.assertRaises(DatabaseError, self.column_classes, LocalBookWithM2M._meta.get_field("tags").remote_field.through)
# Ensure the new M2M exists and points to UniqueTest
constraints = self.get_constraints(new_field.remote_field.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["uniquetest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id'))
break
else:
self.fail("No FK constraint for uniquetest_id found")
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@unittest.skipUnless(connection.features.supports_column_check_constraints, "No check constraints")
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
self.assertRaises(IntegrityError, TagUniqueRename.objects.create, title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, [])
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author)
new_field.set_attributes_from_name('author')
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [['author', 'title']], [])
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
# Ensure there is an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c['columns'] == ["slug", "title"]
),
)
def test_db_table(self):
"""
Tests renaming of the table
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
# Ensure the table is there afterwards
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table again
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['id']['primary_key'],
)
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
'id',
self.get_indexes(Tag._meta.db_table),
)
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['slug']['primary_key'],
)
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id")
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(AuthorWithEvenLongerName, related_name="something")
new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk")
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
new_field = ForeignObject(Author, from_fields=['author_id'], to_fields=['id'])
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail("Errors when applying initial migration for a model "
"with a table named after a SQL reserved word: %s" % e)
# Check that it's there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Thing.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, **kwargs):
kwargs['db_column'] = "CamelCase"
field = kwargs.pop('field_class', IntegerField)(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
editor.execute(
editor.sql_create_index % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseIndex"),
"columns": editor.quote_name(column),
"extra": "",
}
)
editor.alter_field(model, get_field(db_index=True), field)
editor.execute(
editor.sql_create_unique % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseUniqConstraint"),
"columns": editor.quote_name(field.column),
}
)
editor.alter_field(model, get_field(unique=True), field)
editor.execute(
editor.sql_create_fk % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseFKConstraint"),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
}
)
editor.alter_field(model, get_field(Author, field_class=ForeignKey), field)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '')
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default='surname default')
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], 'surname default')
# And that the default is no longer set in the database.
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
self.assertEqual(Author.objects.get().height, None)
old_field = Author._meta.get_field('height')
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name('height')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
| bsd-3-clause |
ahmed-mahran/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/gdal/geometries.py | 209 | 25898 | """
The OGRGeometry is a wrapper for using the OGR Geometry class
(see http://www.gdal.org/ogr/classOGRGeometry.html). OGRGeometry
may be instantiated when reading geometries from OGR Data Sources
(e.g. SHP files), or when given OGC WKT (a string).
While the 'full' API is not present yet, the API is "pythonic" unlike
the traditional and "next-generation" OGR Python bindings. One major
advantage OGR Geometries have over their GEOS counterparts is support
for spatial reference systems and their transformation.
Example:
>>> from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, SpatialReference
>>> wkt1, wkt2 = 'POINT(-90 30)', 'POLYGON((0 0, 5 0, 5 5, 0 5)'
>>> pnt = OGRGeometry(wkt1)
>>> print(pnt)
POINT (-90 30)
>>> mpnt = OGRGeometry(OGRGeomType('MultiPoint'), SpatialReference('WGS84'))
>>> mpnt.add(wkt1)
>>> mpnt.add(wkt1)
>>> print(mpnt)
MULTIPOINT (-90 30,-90 30)
>>> print(mpnt.srs.name)
WGS 84
>>> print(mpnt.srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> mpnt.transform_to(SpatialReference('NAD27'))
>>> print(mpnt.proj)
+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs
>>> print(mpnt)
MULTIPOINT (-89.999930378602485 29.999797886557641,-89.999930378602485 29.999797886557641)
The OGRGeomType class is to make it easy to specify an OGR geometry type:
>>> from django.contrib.gis.gdal import OGRGeomType
>>> gt1 = OGRGeomType(3) # Using an integer for the type
>>> gt2 = OGRGeomType('Polygon') # Using a string
>>> gt3 = OGRGeomType('POLYGON') # It's case-insensitive
>>> print(gt1 == 3, gt1 == 'Polygon') # Equivalence works w/non-OGRGeomType objects
True True
"""
# Python library requisites.
import sys
from binascii import a2b_hex, b2a_hex
from ctypes import byref, string_at, c_char_p, c_double, c_ubyte, c_void_p
from django.contrib.gis import memoryview
# Getting GDAL prerequisites
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.libgdal import GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
# Getting the ctypes prototype functions that interface w/the GDAL C library.
from django.contrib.gis.gdal.prototypes import geom as capi, srs as srs_api
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
from django.utils import six
from django.utils.six.moves import xrange
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_G_* routines are relevant here.
#### OGRGeometry Class ####
class OGRGeometry(GDALBase):
"Generally encapsulates an OGR geometry."
def __init__(self, geom_input, srs=None):
"Initializes Geometry on either WKT or an OGR pointer as input."
str_instance = isinstance(geom_input, six.string_types)
# If HEX, unpack input to a binary buffer.
if str_instance and hex_regex.match(geom_input):
geom_input = memoryview(a2b_hex(geom_input.upper().encode()))
str_instance = False
# Constructing the geometry,
if str_instance:
wkt_m = wkt_regex.match(geom_input)
json_m = json_regex.match(geom_input)
if wkt_m:
if wkt_m.group('srid'):
# If there's EWKT, set the SRS w/value of the SRID.
srs = int(wkt_m.group('srid'))
if wkt_m.group('type').upper() == 'LINEARRING':
# OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.
# See http://trac.osgeo.org/gdal/ticket/1992.
g = capi.create_geom(OGRGeomType(wkt_m.group('type')).num)
capi.import_wkt(g, byref(c_char_p(wkt_m.group('wkt').encode())))
else:
g = capi.from_wkt(byref(c_char_p(wkt_m.group('wkt').encode())), None, byref(c_void_p()))
elif json_m:
g = capi.from_json(geom_input.encode())
else:
# Seeing if the input is a valid short-hand string
# (e.g., 'Point', 'POLYGON').
ogr_t = OGRGeomType(geom_input)
g = capi.create_geom(OGRGeomType(geom_input).num)
elif isinstance(geom_input, memoryview):
# WKB was passed in
g = capi.from_wkb(bytes(geom_input), None, byref(c_void_p()), len(geom_input))
elif isinstance(geom_input, OGRGeomType):
# OGRGeomType was passed in, an empty geometry will be created.
g = capi.create_geom(geom_input.num)
elif isinstance(geom_input, self.ptr_type):
# OGR pointer (c_void_p) was the input.
g = geom_input
else:
raise OGRException('Invalid input type for OGR Geometry construction: %s' % type(geom_input))
# Now checking the Geometry pointer before finishing initialization
# by setting the pointer for the object.
if not g:
raise OGRException('Cannot create OGR Geometry from input: %s' % str(geom_input))
self.ptr = g
# Assigning the SpatialReference object to the geometry, if valid.
if bool(srs): self.srs = srs
# Setting the class depending upon the OGR Geometry Type
self.__class__ = GEO_CLASSES[self.geom_type.num]
def __del__(self):
"Deletes this Geometry."
if self._ptr: capi.destroy_geom(self._ptr)
# Pickle routines
def __getstate__(self):
srs = self.srs
if srs:
srs = srs.wkt
else:
srs = None
return bytes(self.wkb), srs
def __setstate__(self, state):
wkb, srs = state
ptr = capi.from_wkb(wkb, None, byref(c_void_p()), len(wkb))
if not ptr: raise OGRException('Invalid OGRGeometry loaded from pickled state.')
self.ptr = ptr
self.srs = srs
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return OGRGeometry( 'POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0) )
### Geometry set-like operations ###
# g = g1 | g2
def __or__(self, other):
"Returns the union of the two geometries."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
def __eq__(self, other):
"Is this Geometry equal to the other?"
if isinstance(other, OGRGeometry):
return self.equals(other)
else:
return False
def __ne__(self, other):
"Tests for inequality."
return not (self == other)
def __str__(self):
"WKT is used for the string representation."
return self.wkt
#### Geometry Properties ####
@property
def dimension(self):
"Returns 0 for points, 1 for lines, and 2 for surfaces."
return capi.get_dims(self.ptr)
def _get_coord_dim(self):
"Returns the coordinate dimension of the Geometry."
if isinstance(self, GeometryCollection) and GDAL_VERSION < (1, 5, 2):
# On GDAL versions prior to 1.5.2, there exists a bug in which
# the coordinate dimension of geometry collections is always 2:
# http://trac.osgeo.org/gdal/ticket/2334
# Here we workaround by returning the coordinate dimension of the
# first geometry in the collection instead.
if len(self):
return capi.get_coord_dim(capi.get_geom_ref(self.ptr, 0))
return capi.get_coord_dim(self.ptr)
def _set_coord_dim(self, dim):
"Sets the coordinate dimension of this Geometry."
if not dim in (2, 3):
raise ValueError('Geometry dimension must be either 2 or 3')
capi.set_coord_dim(self.ptr, dim)
coord_dim = property(_get_coord_dim, _set_coord_dim)
@property
def geom_count(self):
"The number of elements in this Geometry."
return capi.get_geom_count(self.ptr)
@property
def point_count(self):
"Returns the number of Points in this Geometry."
return capi.get_point_count(self.ptr)
@property
def num_points(self):
"Alias for `point_count` (same name method in GEOS API.)"
return self.point_count
@property
def num_coords(self):
"Alais for `point_count`."
return self.point_count
@property
def geom_type(self):
"Returns the Type for this Geometry."
return OGRGeomType(capi.get_geom_type(self.ptr))
@property
def geom_name(self):
"Returns the Name of this Geometry."
return capi.get_geom_name(self.ptr)
@property
def area(self):
"Returns the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise."
return capi.get_area(self.ptr)
@property
def envelope(self):
"Returns the envelope for this Geometry."
# TODO: Fix Envelope() for Point geometries.
return Envelope(capi.get_envelope(self.ptr, byref(OGREnvelope())))
@property
def extent(self):
"Returns the envelope as a 4-tuple, instead of as an Envelope object."
return self.envelope.tuple
#### SpatialReference-related Properties ####
# The SRS property
def _get_srs(self):
"Returns the Spatial Reference for this Geometry."
try:
srs_ptr = capi.get_geom_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(srs_ptr))
except SRSException:
return None
def _set_srs(self, srs):
"Sets the SpatialReference for this geometry."
# Do not have to clone the `SpatialReference` object pointer because
# when it is assigned to this `OGRGeometry` it's internal OGR
# reference count is incremented, and will likewise be released
# (decremented) when this geometry's destructor is called.
if isinstance(srs, SpatialReference):
srs_ptr = srs.ptr
elif isinstance(srs, six.integer_types + six.string_types):
sr = SpatialReference(srs)
srs_ptr = sr.ptr
else:
raise TypeError('Cannot assign spatial reference with object of type: %s' % type(srs))
capi.assign_srs(self.ptr, srs_ptr)
srs = property(_get_srs, _set_srs)
# The SRID property
def _get_srid(self):
srs = self.srs
if srs: return srs.srid
return None
def _set_srid(self, srid):
if isinstance(srid, six.integer_types):
self.srs = srid
else:
raise TypeError('SRID must be set with an integer.')
srid = property(_get_srid, _set_srid)
#### Output Methods ####
@property
def geos(self):
"Returns a GEOSGeometry object from this OGRGeometry."
from django.contrib.gis.geos import GEOSGeometry
return GEOSGeometry(self.wkb, self.srid)
@property
def gml(self):
"Returns the GML representation of the Geometry."
return capi.to_gml(self.ptr)
@property
def hex(self):
"Returns the hexadecimal representation of the WKB (a string)."
return b2a_hex(self.wkb).upper()
@property
def json(self):
"""
Returns the GeoJSON representation of this Geometry.
"""
return capi.to_json(self.ptr)
geojson = json
@property
def kml(self):
"Returns the KML representation of the Geometry."
return capi.to_kml(self.ptr, None)
@property
def wkb_size(self):
"Returns the size of the WKB buffer."
return capi.get_wkbsize(self.ptr)
@property
def wkb(self):
"Returns the WKB representation of the Geometry."
if sys.byteorder == 'little':
byteorder = 1 # wkbNDR (from ogr_core.h)
else:
byteorder = 0 # wkbXDR
sz = self.wkb_size
# Creating the unsigned character buffer, and passing it in by reference.
buf = (c_ubyte * sz)()
wkb = capi.to_wkb(self.ptr, byteorder, byref(buf))
# Returning a buffer of the string at the pointer.
return memoryview(string_at(buf, sz))
@property
def wkt(self):
"Returns the WKT representation of the Geometry."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def ewkt(self):
"Returns the EWKT representation of the Geometry."
srs = self.srs
if srs and srs.srid:
return 'SRID=%s;%s' % (srs.srid, self.wkt)
else:
return self.wkt
#### Geometry Methods ####
def clone(self):
"Clones this OGR Geometry."
return OGRGeometry(capi.clone_geom(self.ptr), self.srs)
def close_rings(self):
"""
If there are any rings within this geometry that have not been
closed, this routine will do so by adding the starting point at the
end.
"""
# Closing the open rings.
capi.geom_close_rings(self.ptr)
def transform(self, coord_trans, clone=False):
"""
Transforms this geometry to a different spatial reference system.
May take a CoordTransform object, a SpatialReference object, string
WKT or PROJ.4, and/or an integer SRID. By default nothing is returned
and the geometry is transformed in-place. However, if the `clone`
keyword is set, then a transformed clone of this geometry will be
returned.
"""
if clone:
klone = self.clone()
klone.transform(coord_trans)
return klone
# Have to get the coordinate dimension of the original geometry
# so it can be used to reset the transformed geometry's dimension
# afterwards. This is done because of GDAL bug (in versions prior
# to 1.7) that turns geometries 3D after transformation, see:
# http://trac.osgeo.org/gdal/changeset/17792
if GDAL_VERSION < (1, 7):
orig_dim = self.coord_dim
# Depending on the input type, use the appropriate OGR routine
# to perform the transformation.
if isinstance(coord_trans, CoordTransform):
capi.geom_transform(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, SpatialReference):
capi.geom_transform_to(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, six.integer_types + six.string_types):
sr = SpatialReference(coord_trans)
capi.geom_transform_to(self.ptr, sr.ptr)
else:
raise TypeError('Transform only accepts CoordTransform, '
'SpatialReference, string, and integer objects.')
# Setting with original dimension, see comment above.
if GDAL_VERSION < (1, 7):
if isinstance(self, GeometryCollection):
# With geometry collections have to set dimension on
# each internal geometry reference, as the collection
# dimension isn't affected.
for i in xrange(len(self)):
internal_ptr = capi.get_geom_ref(self.ptr, i)
if orig_dim != capi.get_coord_dim(internal_ptr):
capi.set_coord_dim(internal_ptr, orig_dim)
else:
if self.coord_dim != orig_dim:
self.coord_dim = orig_dim
def transform_to(self, srs):
"For backwards-compatibility."
self.transform(srs)
#### Topology Methods ####
def _topology(self, func, other):
"""A generalized function for topology operations, takes a GDAL function and
the other geometry to perform the operation on."""
if not isinstance(other, OGRGeometry):
raise TypeError('Must use another OGRGeometry object for topology operations!')
# Returning the output of the given function with the other geometry's
# pointer.
return func(self.ptr, other.ptr)
def intersects(self, other):
"Returns True if this geometry intersects with the other."
return self._topology(capi.ogr_intersects, other)
def equals(self, other):
"Returns True if this geometry is equivalent to the other."
return self._topology(capi.ogr_equals, other)
def disjoint(self, other):
"Returns True if this geometry and the other are spatially disjoint."
return self._topology(capi.ogr_disjoint, other)
def touches(self, other):
"Returns True if this geometry touches the other."
return self._topology(capi.ogr_touches, other)
def crosses(self, other):
"Returns True if this geometry crosses the other."
return self._topology(capi.ogr_crosses, other)
def within(self, other):
"Returns True if this geometry is within the other."
return self._topology(capi.ogr_within, other)
def contains(self, other):
"Returns True if this geometry contains the other."
return self._topology(capi.ogr_contains, other)
def overlaps(self, other):
"Returns True if this geometry overlaps the other."
return self._topology(capi.ogr_overlaps, other)
#### Geometry-generation Methods ####
def _geomgen(self, gen_func, other=None):
"A helper routine for the OGR routines that generate geometries."
if isinstance(other, OGRGeometry):
return OGRGeometry(gen_func(self.ptr, other.ptr), self.srs)
else:
return OGRGeometry(gen_func(self.ptr), self.srs)
@property
def boundary(self):
"Returns the boundary of this geometry."
return self._geomgen(capi.get_boundary)
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points in
this Geometry.
"""
return self._geomgen(capi.geom_convex_hull)
def difference(self, other):
"""
Returns a new geometry consisting of the region which is the difference
of this geometry and the other.
"""
return self._geomgen(capi.geom_diff, other)
def intersection(self, other):
"""
Returns a new geometry consisting of the region of intersection of this
geometry and the other.
"""
return self._geomgen(capi.geom_intersection, other)
def sym_difference(self, other):
"""
Returns a new geometry which is the symmetric difference of this
geometry and the other.
"""
return self._geomgen(capi.geom_sym_diff, other)
def union(self, other):
"""
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
"""
return self._geomgen(capi.geom_union, other)
# The subclasses for OGR Geometry.
class Point(OGRGeometry):
@property
def x(self):
"Returns the X coordinate for this Point."
return capi.getx(self.ptr, 0)
@property
def y(self):
"Returns the Y coordinate for this Point."
return capi.gety(self.ptr, 0)
@property
def z(self):
"Returns the Z coordinate for this Point."
if self.coord_dim == 3:
return capi.getz(self.ptr, 0)
@property
def tuple(self):
"Returns the tuple of this point."
if self.coord_dim == 2:
return (self.x, self.y)
elif self.coord_dim == 3:
return (self.x, self.y, self.z)
coords = tuple
class LineString(OGRGeometry):
def __getitem__(self, index):
"Returns the Point at the given index."
if index >= 0 and index < self.point_count:
x, y, z = c_double(), c_double(), c_double()
capi.get_point(self.ptr, index, byref(x), byref(y), byref(z))
dim = self.coord_dim
if dim == 1:
return (x.value,)
elif dim == 2:
return (x.value, y.value)
elif dim == 3:
return (x.value, y.value, z.value)
else:
raise OGRIndexError('index out of range: %s' % str(index))
def __iter__(self):
"Iterates over each point in the LineString."
for i in xrange(self.point_count):
yield self[i]
def __len__(self):
"The length returns the number of points in the LineString."
return self.point_count
@property
def tuple(self):
"Returns the tuple representation of this LineString."
return tuple([self[i] for i in xrange(len(self))])
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function.
"""
return [func(self.ptr, i) for i in xrange(len(self))]
@property
def x(self):
"Returns the X coordinates in a list."
return self._listarr(capi.getx)
@property
def y(self):
"Returns the Y coordinates in a list."
return self._listarr(capi.gety)
@property
def z(self):
"Returns the Z coordinates in a list."
if self.coord_dim == 3:
return self._listarr(capi.getz)
# LinearRings are used in Polygons.
class LinearRing(LineString): pass
class Polygon(OGRGeometry):
def __len__(self):
"The number of interior rings in this Polygon."
return self.geom_count
def __iter__(self):
"Iterates through each ring in the Polygon."
for i in xrange(self.geom_count):
yield self[i]
def __getitem__(self, index):
"Gets the ring at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
# Polygon Properties
@property
def shell(self):
"Returns the shell of this Polygon."
return self[0] # First ring is the shell
exterior_ring = shell
@property
def tuple(self):
"Returns a tuple of LinearRing coordinate tuples."
return tuple([self[i].tuple for i in xrange(self.geom_count)])
coords = tuple
@property
def point_count(self):
"The number of Points in this Polygon."
# Summing up the number of points in each ring of the Polygon.
return sum([self[i].point_count for i in xrange(self.geom_count)])
@property
def centroid(self):
"Returns the centroid (a Point) of this Polygon."
# The centroid is a Point, create a geometry for this.
p = OGRGeometry(OGRGeomType('Point'))
capi.get_centroid(self.ptr, p.ptr)
return p
# Geometry Collection base class.
class GeometryCollection(OGRGeometry):
"The Geometry Collection class."
def __getitem__(self, index):
"Gets the Geometry at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
def __iter__(self):
"Iterates over each Geometry."
for i in xrange(self.geom_count):
yield self[i]
def __len__(self):
"The number of geometries in this Geometry Collection."
return self.geom_count
def add(self, geom):
"Add the geometry to this Geometry Collection."
if isinstance(geom, OGRGeometry):
if isinstance(geom, self.__class__):
for g in geom: capi.add_geom(self.ptr, g.ptr)
else:
capi.add_geom(self.ptr, geom.ptr)
elif isinstance(geom, six.string_types):
tmp = OGRGeometry(geom)
capi.add_geom(self.ptr, tmp.ptr)
else:
raise OGRException('Must add an OGRGeometry.')
@property
def point_count(self):
"The number of Points in this Geometry Collection."
# Summing up the number of points in each geometry in this collection
return sum([self[i].point_count for i in xrange(self.geom_count)])
@property
def tuple(self):
"Returns a tuple representation of this Geometry Collection."
return tuple([self[i].tuple for i in xrange(self.geom_count)])
coords = tuple
# Multiple Geometry types.
class MultiPoint(GeometryCollection): pass
class MultiLineString(GeometryCollection): pass
class MultiPolygon(GeometryCollection): pass
# Class mapping dictionary (using the OGRwkbGeometryType as the key)
GEO_CLASSES = {1 : Point,
2 : LineString,
3 : Polygon,
4 : MultiPoint,
5 : MultiLineString,
6 : MultiPolygon,
7 : GeometryCollection,
101: LinearRing,
1 + OGRGeomType.wkb25bit : Point,
2 + OGRGeomType.wkb25bit : LineString,
3 + OGRGeomType.wkb25bit : Polygon,
4 + OGRGeomType.wkb25bit : MultiPoint,
5 + OGRGeomType.wkb25bit : MultiLineString,
6 + OGRGeomType.wkb25bit : MultiPolygon,
7 + OGRGeomType.wkb25bit : GeometryCollection,
}
| apache-2.0 |
joachimmetz/dfvfs | tests/file_io/apfs_file_io.py | 2 | 4656 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Apple File System (APFS) file-like object."""
import os
import unittest
from dfvfs.file_io import apfs_file_io
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from tests import test_lib as shared_test_lib
class APFSFileTest(shared_test_lib.BaseTestCase):
"""Tests the file-like object implementation using pyfsapfs.file_entry."""
_IDENTIFIER_ANOTHER_FILE = 21
_IDENTIFIER_PASSWORDS_TXT = 20
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(APFSFileTest, self).setUp()
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['apfs.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
self._apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/apfs1',
parent=test_raw_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseIdentifier(self):
"""Test the open and close functionality using an identifier."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._apfs_container_path_spec)
file_object = apfs_file_io.APFSFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# TODO: add a failing scenario.
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/passwords.txt',
parent=self._apfs_container_path_spec)
file_object = apfs_file_io.APFSFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = apfs_file_io.APFSFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/a_directory/another_file',
identifier=self._IDENTIFIER_ANOTHER_FILE,
parent=self._apfs_container_path_spec)
file_object = apfs_file_io.APFSFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 22)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/passwords.txt',
identifier=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._apfs_container_path_spec)
file_object = apfs_file_io.APFSFile(self._resolver_context, path_spec)
file_object.Open()
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password\n'
b'bank,joesmith,superrich\n'
b'alarm system,-,1234\n'
b'treasure chest,-,1111\n'
b'uber secret laire,admin,admin\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
drawks/ansible | lib/ansible/plugins/doc_fragments/influxdb.py | 25 | 2166 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Parameters for influxdb modules
DOCUMENTATION = r'''
options:
hostname:
description:
- The hostname or IP address on which InfluxDB server is listening.
- Since Ansible 2.5, defaulted to localhost.
type: str
default: localhost
username:
description:
- Username that will be used to authenticate against InfluxDB server.
- Alias C(login_username) added in Ansible 2.5.
type: str
default: root
aliases: [ login_username ]
password:
description:
- Password that will be used to authenticate against InfluxDB server.
- Alias C(login_password) added in Ansible 2.5.
type: str
default: root
aliases: [ login_password ]
port:
description:
- The port on which InfluxDB server is listening
type: int
default: 8086
validate_certs:
description:
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
type: bool
default: yes
version_added: "2.5"
ssl:
description:
- Use https instead of http to connect to InfluxDB server.
type: bool
version_added: "2.5"
timeout:
description:
- Number of seconds Requests will wait for client to establish a connection.
type: int
version_added: "2.5"
retries:
description:
- Number of retries client will try before aborting.
- C(0) indicates try until success.
type: int
default: 3
version_added: "2.5"
use_udp:
description:
- Use UDP to connect to InfluxDB server.
type: bool
version_added: "2.5"
udp_port:
description:
- UDP port to connect to InfluxDB server.
type: int
default: 4444
version_added: "2.5"
proxies:
description:
- HTTP(S) proxy to use for Requests to connect to InfluxDB server.
type: dict
version_added: "2.5"
'''
| gpl-3.0 |
trademob/boto | boto/sts/credentials.py | 153 | 8210 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import datetime
import boto.utils
from boto.compat import json
class Credentials(object):
"""
:ivar access_key: The AccessKeyID.
:ivar secret_key: The SecretAccessKey.
:ivar session_token: The session token that must be passed with
requests to use the temporary credentials
:ivar expiration: The timestamp for when the credentials will expire
"""
def __init__(self, parent=None):
self.parent = parent
self.access_key = None
self.secret_key = None
self.session_token = None
self.expiration = None
self.request_id = None
@classmethod
def from_json(cls, json_doc):
"""
Create and return a new Session Token based on the contents
of a JSON document.
:type json_doc: str
:param json_doc: A string containing a JSON document with a
previously saved Credentials object.
"""
d = json.loads(json_doc)
token = cls()
token.__dict__.update(d)
return token
@classmethod
def load(cls, file_path):
"""
Create and return a new Session Token based on the contents
of a previously saved JSON-format file.
:type file_path: str
:param file_path: The fully qualified path to the JSON-format
file containing the previously saved Session Token information.
"""
fp = open(file_path)
json_doc = fp.read()
fp.close()
return cls.from_json(json_doc)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'AccessKeyId':
self.access_key = value
elif name == 'SecretAccessKey':
self.secret_key = value
elif name == 'SessionToken':
self.session_token = value
elif name == 'Expiration':
self.expiration = value
elif name == 'RequestId':
self.request_id = value
else:
pass
def to_dict(self):
"""
Return a Python dict containing the important information
about this Session Token.
"""
return {'access_key': self.access_key,
'secret_key': self.secret_key,
'session_token': self.session_token,
'expiration': self.expiration,
'request_id': self.request_id}
def save(self, file_path):
"""
Persist a Session Token to a file in JSON format.
:type path: str
:param path: The fully qualified path to the file where the
the Session Token data should be written. Any previous
data in the file will be overwritten. To help protect
the credentials contained in the file, the permissions
of the file will be set to readable/writable by owner only.
"""
fp = open(file_path, 'w')
json.dump(self.to_dict(), fp)
fp.close()
os.chmod(file_path, 0o600)
def is_expired(self, time_offset_seconds=0):
"""
Checks to see if the Session Token is expired or not. By default
it will check to see if the Session Token is expired as of the
moment the method is called. However, you can supply an
optional parameter which is the number of seconds of offset
into the future for the check. For example, if you supply
a value of 5, this method will return a True if the Session
Token will be expired 5 seconds from this moment.
:type time_offset_seconds: int
:param time_offset_seconds: The number of seconds into the future
to test the Session Token for expiration.
"""
now = datetime.datetime.utcnow()
if time_offset_seconds:
now = now + datetime.timedelta(seconds=time_offset_seconds)
ts = boto.utils.parse_ts(self.expiration)
delta = ts - now
return delta.total_seconds() <= 0
class FederationToken(object):
"""
:ivar credentials: A Credentials object containing the credentials.
:ivar federated_user_arn: ARN specifying federated user using credentials.
:ivar federated_user_id: The ID of the federated user using credentials.
:ivar packed_policy_size: A percentage value indicating the size of
the policy in packed form
"""
def __init__(self, parent=None):
self.parent = parent
self.credentials = None
self.federated_user_arn = None
self.federated_user_id = None
self.packed_policy_size = None
self.request_id = None
def startElement(self, name, attrs, connection):
if name == 'Credentials':
self.credentials = Credentials()
return self.credentials
else:
return None
def endElement(self, name, value, connection):
if name == 'Arn':
self.federated_user_arn = value
elif name == 'FederatedUserId':
self.federated_user_id = value
elif name == 'PackedPolicySize':
self.packed_policy_size = int(value)
elif name == 'RequestId':
self.request_id = value
else:
pass
class AssumedRole(object):
"""
:ivar user: The assumed role user.
:ivar credentials: A Credentials object containing the credentials.
"""
def __init__(self, connection=None, credentials=None, user=None):
self._connection = connection
self.credentials = credentials
self.user = user
def startElement(self, name, attrs, connection):
if name == 'Credentials':
self.credentials = Credentials()
return self.credentials
elif name == 'AssumedRoleUser':
self.user = User()
return self.user
def endElement(self, name, value, connection):
pass
class User(object):
"""
:ivar arn: The arn of the user assuming the role.
:ivar assume_role_id: The identifier of the assumed role.
"""
def __init__(self, arn=None, assume_role_id=None):
self.arn = arn
self.assume_role_id = assume_role_id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Arn':
self.arn = value
elif name == 'AssumedRoleId':
self.assume_role_id = value
class DecodeAuthorizationMessage(object):
"""
:ivar request_id: The request ID.
:ivar decoded_message: The decoded authorization message (may be JSON).
"""
def __init__(self, request_id=None, decoded_message=None):
self.request_id = request_id
self.decoded_message = decoded_message
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'requestId':
self.request_id = value
elif name == 'DecodedMessage':
self.decoded_message = value
| mit |
INP-Group/ProjectN-Control | src/server/asy_server.py | 1 | 5468 | # -*- encoding: utf-8 -*-
import json
import sys
import asyncio
import asyncio.streams
def validate_package(data):
try:
result = True
assert data and isinstance(data, dict)
assert 'command' in data
assert 'data' in data
except AssertionError:
result = False
return result
class MyServer(object):
"""
This is just an example of how a TCP server might be potentially
structured. This class has basically 3 methods: start the server,
handle a client, and stop the server.
Note that you don't have to follow this structure, it is really
just an example or possible starting point.
"""
def __init__(self):
self.server = None # encapsulates the server sockets
# this keeps track of all the clients that connected to our
# server. It can be useful in some cases, for instance to
# kill client connections or to broadcast some data to all
# clients...
self.clients = {} # task -> (reader, writer)
self._commands = {}
self._add_command_processing('SUM2', self._command_sum2)
@asyncio.coroutine
def _processing(self, data):
try:
assert data['command'] in self._commands, \
"Not found command(%s) in list" % data['command']
result = {
'ok': True,
'result': self._commands[data['command']](data['command'],
data['data'])
}
except Exception as e:
result = {'error': str(e) if e else 'empty error', 'ok': False}
return result
def _command_sum2(self, command: str, data: dict):
assert 'arg1' in data.keys(), "Not found argument arg1"
assert 'arg2' in data.keys(), "Not found argument arg2"
assert isinstance(data['arg1'], (int, float)), "arg1 is not number"
assert isinstance(data['arg2'], (int, float)), 'arg2 is not number'
return data['arg1'] + data['arg2']
def _add_command_processing(self, command, func):
self._commands[command] = func
def _accept_client(self, client_reader, client_writer):
"""
This method accepts a new client connection and creates a Task
to handle this client. self.clients is updated to keep track
of the new client.
"""
# start a new Task to handle this specific client connection
task = asyncio.Task(self._handle_client(client_reader, client_writer))
self.clients[task] = (client_reader, client_writer)
def client_done(task):
print("client task done:", task, file=sys.stderr)
del self.clients[task]
task.add_done_callback(client_done)
@asyncio.coroutine
def _handle_client(self, client_reader, client_writer):
"""
This method actually does the work to handle the requests for
a specific client. The protocol is line oriented, so there is
a main loop that reads a line with a request and then sends
out one or more lines back to the client with the result.
"""
while True:
data = (yield from client_reader.readline()).decode("utf-8")
if not data: # an empty string means the client disconnected
break
msg = data.rstrip()
if msg and isinstance(msg, str):
try:
msg_data = json.loads(msg)
self._log(msg_data)
assert validate_package(
msg_data), "Not valid package from client"
result = yield from self._processing(msg_data)
except (AssertionError, ValueError) as e:
result = {
'ok': False,
'error': str(e),
}
client_writer.write("{!r}\n".format(json.dumps(result)).encode("utf-8"))
# This enables us to have flow control in our connection.
yield from client_writer.drain()
def _log(self, *args, **kwargs):
print(*args, **kwargs)
def start(self, loop):
"""
Starts the TCP server, so that it listens on port 12345.
For each client that connects, the accept_client method gets
called. This method runs the loop until the server sockets
are ready to accept connections.
"""
self._log("Server started...")
self.server = loop.run_until_complete(
asyncio.streams.start_server(self._accept_client,
'127.0.0.1', 12345,
loop=loop))
def stop(self, loop):
"""
Stops the TCP server, i.e. closes the listening socket(s).
This method runs the loop until the server sockets are closed.
"""
if self.server is not None:
self.server.close()
loop.run_until_complete(self.server.wait_closed())
self.server = None
self._log("Server stopped...")
def main():
loop = asyncio.get_event_loop()
# creates a server and starts listening to TCP connections
server = MyServer()
server.start(loop)
# creates a client and connects to our server
try:
loop.run_forever()
server.stop(loop)
finally:
loop.close()
if __name__ == '__main__':
main() | mit |
dracos/django | tests/admin_widgets/tests.py | 14 | 60031 | import gettext
import os
import re
from datetime import datetime, timedelta
from importlib import import_module
import pytz
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import CharField, DateField, DateTimeField
from django.test import SimpleTestCase, TestCase, override_settings
from django.urls import reverse
from django.utils import translation
from .models import (
Advisor, Album, Band, Bee, Car, Company, Event, Honeycomb, Individual,
Inventory, Member, MyFileField, Profile, School, Student,
)
from .widgetadmin import site as widget_admin_site
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email=None)
cls.u2 = User.objects.create_user(username='testser', password='secret')
Car.objects.create(owner=cls.superuser, make='Volkswagen', model='Passat')
Car.objects.create(owner=cls.u2, make='BMW', model='M3')
class AdminFormfieldForDBFieldTests(SimpleTestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
self.assertIsInstance(widget, widgetclass)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(Event, 'start_date', widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(Member, 'birthdate', widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(Event, 'start_time', widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(Event, 'description', widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(Event, 'link', widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(Event, 'min_age', widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(Member, 'name', widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(Member, 'email', widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(Album, 'cover_art', widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(Event, 'main_band', forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(Event, 'main_band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['main_band'])
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(Event, 'main_band', widgets.AdminRadioSelect,
radio_fields={'main_band': admin.VERTICAL})
self.assertIsNone(ff.empty_label)
def test_many_to_many(self):
self.assertFormfield(Band, 'members', forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def test_filtered_many_to_many(self):
self.assertFormfield(Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def test_formfield_overrides(self):
self.assertFormfield(Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def test_formfield_overrides_widget_instances(self):
"""
Widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size': '10'})}
}
ma = BandAdmin(Band, admin.site)
f1 = ma.formfield_for_dbfield(Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def test_formfield_overrides_for_datetime_field(self):
"""
Overriding the widget for DateTimeField doesn't overrides the default
form_class for that field (#26449).
"""
class MemberAdmin(admin.ModelAdmin):
formfield_overrides = {DateTimeField: {'widget': widgets.AdminSplitDateTime}}
ma = MemberAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(Member._meta.get_field('birthdate'), request=None)
self.assertIsInstance(f1.widget, widgets.AdminSplitDateTime)
self.assertIsInstance(f1, forms.SplitDateTimeField)
def test_formfield_overrides_for_custom_field(self):
"""
formfield_overrides works for a custom field class.
"""
class AlbumAdmin(admin.ModelAdmin):
formfield_overrides = {MyFileField: {'widget': forms.TextInput()}}
ma = AlbumAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(Album._meta.get_field('backside_art'), request=None)
self.assertIsInstance(f1.widget, forms.TextInput)
def test_field_with_choices(self):
self.assertFormfield(Member, 'gender', forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender': admin.VERTICAL})
def test_inheritance(self):
self.assertFormfield(Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ['companies']
self.assertFormfield(Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(Advisor, admin.site)
f = ma.formfield_for_dbfield(Advisor._meta.get_field('companies'), request=None)
self.assertEqual(
f.help_text,
'Hold down "Control", or "Command" on a Mac, to select more than one.'
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase):
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin:admin_widgets_cartire_add'))
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagen Passat")
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyWidgetChangeList(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_changelist_ForeignKey(self):
response = self.client.get(reverse('admin:admin_widgets_car_changelist'))
self.assertContains(response, '/auth/user/add/')
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyRawIdWidget(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_nonexistent_target_id(self):
band = Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"main_band": '%s' % pk,
}
# Try posting with a nonexistent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), post_data)
self.assertContains(response, 'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), {"main_band": test_str})
self.assertContains(response, 'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
class FilteredSelectMultipleWidgetTest(SimpleTestCase):
def test_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilter" '
'data-field-name="test\\" data-is-stacked="0">\n</select>'
)
def test_stacked_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilterstacked" '
'data-field-name="test\\" data-is-stacked="1">\n</select>'
)
class AdminDateWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10" />',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20" />',
)
class AdminTimeWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8" />',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20" />',
)
class AdminSplitDateTimeWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Date: <input value="2007-12-01" type="text" class="vDateField" '
'name="test_0" size="10" /><br />'
'Time: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8" /></p>'
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with self.settings(USE_L10N=True), translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Datum: <input value="01.12.2007" type="text" '
'class="vDateField" name="test_0"size="10" /><br />'
'Zeit: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8" /></p>'
)
class AdminURLWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url" />'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">'
'http://example.com</a><br />'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example.com" /></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">'
'http://example-äüö.com</a><br />'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com" /></p>'
)
def test_render_quoting(self):
"""
WARNING: This test doesn't use assertHTMLEqual since it will get rid
of some escapes which are tested here!
"""
HREF_RE = re.compile('href="([^"]+)"')
VALUE_RE = re.compile('value="([^"]+)"')
TEXT_RE = re.compile('<a[^>]+>([^>]+)</a>')
w = widgets.AdminURLFieldWidget()
output = w.render('test', 'http://example.com/<sometag>some text</sometag>')
self.assertEqual(
HREF_RE.search(output).groups()[0],
'http://example.com/%3Csometag%3Esome%20text%3C/sometag%3E',
)
self.assertEqual(
TEXT_RE.search(output).groups()[0],
'http://example.com/<sometag>some text</sometag>',
)
self.assertEqual(
VALUE_RE.search(output).groups()[0],
'http://example.com/<sometag>some text</sometag>',
)
output = w.render('test', 'http://example-äüö.com/<sometag>some text</sometag>')
self.assertEqual(
HREF_RE.search(output).groups()[0],
'http://xn--example--7za4pnc.com/%3Csometag%3Esome%20text%3C/sometag%3E',
)
self.assertEqual(
TEXT_RE.search(output).groups()[0],
'http://example-äüö.com/<sometag>some text</sometag>',
)
self.assertEqual(
VALUE_RE.search(output).groups()[0],
'http://example-äüö.com/<sometag>some text</sometag>',
)
output = w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"')
self.assertEqual(
HREF_RE.search(output).groups()[0],
'http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22',
)
self.assertEqual(
TEXT_RE.search(output).groups()[0],
'http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"'
)
self.assertEqual(
VALUE_RE.search(output).groups()[0],
'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"',
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminFileWidgetTests(TestDataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
band = Band.objects.create(name='Linkin Park')
cls.album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" /> '
'<label for="test-clear_id">Clear</label></span><br />'
'Change: <input type="file" name="test" /></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test" />',
)
def test_render_required(self):
widget = widgets.AdminFileWidget()
widget.is_required = True
self.assertHTMLEqual(
widget.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a><br />'
'Change: <input type="file" name="test" /></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin:admin_widgets_album_change', args=(self.album.id,)))
self.assertContains(
response,
'<div class="readonly"><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
r'albums\hybrid_theory.jpg</a></div>' % {'STORAGE_URL': default_storage.url('')},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art" />',
html=True,
)
response = self.client.get(reverse('admin:admin_widgets_album_add'))
self.assertContains(
response,
'<div class="readonly"></div>',
html=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ForeignKeyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = Album._meta.get_field('band').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.pk, attrs={}),
'<input type="text" name="test" value="%(bandpk)s" '
'class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/band/?_to_field=id" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a> <strong>'
'<a href="/admin_widgets/band/%(bandpk)s/change/">Linkin Park</a>'
'</strong>' % {'bandpk': band.pk}
)
def test_relations_to_non_primary_key(self):
# ForeignKeyRawIdWidget works with fields which aren't related to
# the model's primary key.
apple = Inventory.objects.create(barcode=86, name='Apple')
Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}),
'<input type="text" name="test" value="86" '
'class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Apple</a></strong>' % {'pk': apple.pk}
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = Bee._meta.get_field('honeycomb').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s" />'
' <strong>%(hcomb)s</strong>'
% {'hcombpk': big_honeycomb.pk, 'hcomb': big_honeycomb}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = Individual.objects.create(name='Subject #1')
Individual.objects.create(name='Child', parent=subject1)
rel = Individual._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s" />'
' <strong>%(subj1)s</strong>'
% {'subj1pk': subject1.pk, 'subj1': subject1}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Hidden</a></strong>' % {'pk': hidden.pk}
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ManyToManyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name='Linkin Park')
m1 = Member.objects.create(name='Chester')
m2 = Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = Band._meta.get_field('members').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}), (
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField" />'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % {'m1pk': m1.pk, 'm2pk': m2.pk}
)
self.assertHTMLEqual(
w.render('test', [m1.pk]), (
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % {'m1pk': m1.pk}
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = Advisor.objects.create(name='Rockstar Techie')
c1 = Company.objects.create(name='Doodle')
c2 = Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = Advisor._meta.get_field('companies').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s" />' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s" />' % {'c1pk': c1.pk}
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class RelatedFieldWidgetWrapperTests(SimpleTestCase):
def test_no_can_add_related(self):
rel = Individual._meta.get_field('parent').remote_field
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = Individual._meta.get_field('parent').remote_field
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = Individual._meta.get_field('soulmate').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_custom_widget_render(self):
class CustomWidget(forms.Select):
def render(self, *args, **kwargs):
return 'custom render output'
rel = Album._meta.get_field('band').remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
output = wrapper.render('name', 'value')
self.assertIn('custom render output', output)
def test_widget_delegates_value_omitted_from_data(self):
class CustomWidget(forms.Select):
def value_omitted_from_data(self, data, files, name):
return False
rel = Album._meta.get_field('band').remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.value_omitted_from_data({}, {}, 'band'), False)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminWidgetSeleniumTestCase(AdminSeleniumTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.u1 = User.objects.create_superuser(username='super', password='secret', email='super@example.com')
class DateTimePickerSeleniumTests(AdminWidgetSeleniumTestCase):
def test_show_hide_date_time_picker_widgets(self):
"""
Pressing the ESC key or clicking on a widget value closes the date and
time picker widgets.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# First, with the date picker widget ---------------------------------
cal_icon = self.selenium.find_element_by_id('calendarlink0')
# The date picker is hidden
self.assertEqual(self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon
cal_icon.click()
# The date picker is visible
self.assertEqual(self.get_css_value('#calendarbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# The date picker is hidden again
self.assertEqual(self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon, then on the 15th of current month
cal_icon.click()
self.selenium.find_element_by_xpath("//a[contains(text(), '15')]").click()
self.assertEqual(self.get_css_value('#calendarbox0', 'display'), 'none')
self.assertEqual(
self.selenium.find_element_by_id('id_birthdate_0').get_attribute('value'),
datetime.today().strftime('%Y-%m-') + '15',
)
# Then, with the time picker widget ----------------------------------
time_icon = self.selenium.find_element_by_id('clocklink0')
# The time picker is hidden
self.assertEqual(self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon
time_icon.click()
# The time picker is visible
self.assertEqual(self.get_css_value('#clockbox0', 'display'), 'block')
self.assertEqual(
[
x.text for x in
self.selenium.find_elements_by_xpath("//ul[@class='timelist']/li/a")
],
['Now', 'Midnight', '6 a.m.', 'Noon', '6 p.m.']
)
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# The time picker is hidden again
self.assertEqual(self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon, then select the 'Noon' value
time_icon.click()
self.selenium.find_element_by_xpath("//a[contains(text(), 'Noon')]").click()
self.assertEqual(self.get_css_value('#clockbox0', 'display'), 'none')
self.assertEqual(
self.selenium.find_element_by_id('id_birthdate_1').get_attribute('value'),
'12:00:00',
)
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute('class'), 'nonday')
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute('class'), 'selected')
self.assertEqual(selected.text, '1')
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute('class') == 'selected']
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
The calendar shows the date from the input field for every locale
supported by Django.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Enter test data
member = Member.objects.create(name='Bob', birthdate=datetime(1984, 5, 15), gender='M')
# Get month name translations for every locale
month_string = 'May'
path = os.path.join(os.path.dirname(import_module('django.contrib.admin').__file__), 'locale')
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation('djangojs', path, [language_code])
except IOError:
continue
if month_string in catalog._catalog:
month_name = catalog._catalog[month_string]
else:
month_name = month_string
# Get the expected caption
may_translation = month_name
expected_caption = '{0:s} {1:d}'.format(may_translation.upper(), 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code, USE_L10N=True):
# Open a page that has a date picker widget
url = reverse('admin:admin_widgets_member_change', args=(member.pk,))
self.selenium.get(self.live_server_url + url)
# Click on the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Make sure that the right month and year are displayed
self.wait_for_text('#calendarin0 caption', expected_caption)
@override_settings(TIME_ZONE='Asia/Singapore')
class DateTimePickerShortcutsSeleniumTests(AdminWidgetSeleniumTestCase):
def test_date_time_picker_shortcuts(self):
"""
date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
self.admin_login(username='super', password='secret', login_url='/')
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = pytz.timezone('America/Chicago')
utc_now = datetime.now(pytz.utc)
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
now = datetime.now()
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
self.selenium.find_element_by_id('id_name').send_keys('test')
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements_by_css_selector('.field-birthdate .datetimeshortcuts')
for shortcut in shortcuts:
shortcut.find_element_by_tag_name('a').click()
# There is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.selenium.find_elements_by_css_selector('.field-birthdate .timezonewarning')
# Submit the form.
self.selenium.find_element_by_tag_name('form').submit()
self.wait_page_loaded()
# Make sure that "now" in javascript is within 10 seconds
# from "now" on the server side.
member = Member.objects.get(name='test')
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
# The above tests run with Asia/Singapore which are on the positive side of
# UTC. Here we test with a timezone on the negative side.
@override_settings(TIME_ZONE='US/Eastern')
class DateTimePickerAltTimezoneSeleniumTests(DateTimePickerShortcutsSeleniumTests):
pass
class HorizontalVerticalFilterSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
self.lisa = Student.objects.create(name='Lisa')
self.john = Student.objects.create(name='John')
self.bob = Student.objects.create(name='Bob')
self.peter = Student.objects.create(name='Peter')
self.jenny = Student.objects.create(name='Jenny')
self.jason = Student.objects.create(name='Jason')
self.cliff = Student.objects.create(name='Cliff')
self.arthur = Student.objects.create(name='Arthur')
self.school = School.objects.create(name='School of Awesome')
def assertActiveButtons(self, mode, field_name, choose, remove, choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
original_url = self.selenium.current_url
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertSelectOptions(to_box, [str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box, [
str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.get_select_option(from_box, str(self.lisa.id))
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(from_lisa_select_option.get_attribute('title'), from_lisa_select_option.get_attribute('text'))
from_lisa_select_option.click()
self.get_select_option(from_box, str(self.jason.id)).click()
self.get_select_option(from_box, str(self.bob.id)).click()
self.get_select_option(from_box, str(self.john.id)).click()
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id),
])
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.get_select_option(to_box, str(self.lisa.id))
self.assertEqual(to_lisa_select_option.get_attribute('title'), to_lisa_select_option.get_attribute('text'))
# Remove some options -------------------------------------------------
self.get_select_option(to_box, str(self.lisa.id)).click()
self.get_select_option(to_box, str(self.bob.id)).click()
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)
])
self.assertSelectOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.arthur.id)).click()
self.get_select_option(from_box, str(self.cliff.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id),
])
self.assertSelectOptions(to_box, [
str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id),
])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.peter.id)).click()
self.get_select_option(from_box, str(self.lisa.id)).click()
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
# Unselect the options ------------------------------------------------
self.get_select_option(from_box, str(self.peter.id)).click()
self.get_select_option(from_box, str(self.lisa.id)).click()
# Choose some more options --------------------------------------------
self.get_select_option(to_box, str(self.jason.id)).click()
self.get_select_option(to_box, str(self.john.id)).click()
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Unselect the options ------------------------------------------------
self.get_select_option(to_box, str(self.jason.id)).click()
self.get_select_option(to_box, str(self.john.id)).click()
# Pressing buttons shouldn't change the URL.
self.assertEqual(self.selenium.current_url, original_url)
def test_basic(self):
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_school_change', args=(self.school.id,)))
self.wait_page_loaded()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()), [self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()), [self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Typing in the search box filters out options displayed in the 'from'
box.
"""
from selenium.webdriver.common.keys import Keys
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_school_change', args=(self.school.id,)))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
input = self.selenium.find_element_by_id('id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
# -----------------------------------------------------------------
# Choosing a filtered option sends it properly to the 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.get_select_option(from_box, str(self.jason.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.peter.id), str(self.jason.id),
])
self.get_select_option(to_box, str(self.lisa.id)).click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id),
])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Pressing enter on a filtered option sends it properly to
# the 'to' box.
self.get_select_option(to_box, str(self.jason.id)).click()
self.selenium.find_element_by_id(remove_link).click()
input.send_keys('ja')
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()), [self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()), [self.jason, self.peter])
def test_back_button_bug(self):
"""
Some browsers had a bug where navigating away from the change page
and then clicking the browser's back button would clear the
filter_horizontal/filter_vertical widgets (#13614).
"""
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
# Navigate away and go back to the change form page.
self.selenium.find_element_by_link_text('Home').click()
self.selenium.back()
expected_unselected_values = [
str(self.arthur.id), str(self.bob.id), str(self.cliff.id),
str(self.jason.id), str(self.jenny.id), str(self.john.id),
]
expected_selected_values = [str(self.lisa.id), str(self.peter.id)]
# Everything is still in place
self.assertSelectOptions('#id_students_from', expected_unselected_values)
self.assertSelectOptions('#id_students_to', expected_selected_values)
self.assertSelectOptions('#id_alumni_from', expected_unselected_values)
self.assertSelectOptions('#id_alumni_to', expected_selected_values)
def test_refresh_page(self):
"""
Horizontal and vertical filter widgets keep selected options on page
reload (#22955).
"""
self.school.students.add(self.arthur, self.jason)
self.school.alumni.add(self.arthur, self.jason)
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
# self.selenium.refresh() or send_keys(Keys.F5) does hard reload and
# doesn't replicate what happens when a user clicks the browser's
# 'Refresh' button.
self.selenium.execute_script("location.reload()")
self.wait_page_loaded()
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
class AdminRawIdWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
Band.objects.create(id=42, name='Bogey Blues')
Band.objects.create(id=98, name='Green Potatoes')
def test_ForeignKey(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_event_add'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(self.selenium.find_element_by_id('id_main_band').get_attribute('value'), '')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '98')
def test_many_to_many(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_event_add'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(self.selenium.find_element_by_id('id_supporting_bands').get_attribute('value'), '')
# Help text for the field is displayed
self.assertEqual(
self.selenium.find_element_by_css_selector('.field-supporting_bands div.help').text,
'Supporting Bands.'
)
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42,98')
class RelatedFieldWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def test_ForeignKey_using_to_field(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_profile_add'))
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element_by_id('add_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
password_field = self.selenium.find_element_by_id('id_password')
password_field.send_keys('password')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'newuser'
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.selenium.find_element_by_css_selector('#id_user option[value=newuser]')
# Click the Change User button to change it
self.selenium.find_element_by_id('change_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'changednewuser'
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
self.selenium.find_element_by_css_selector('#id_user option[value=changednewuser]')
# Go ahead and submit the form to make sure it works
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.wait_for_text('li.success', 'The profile "changednewuser" was added successfully.')
profiles = Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
| bsd-3-clause |
jaggu303619/asylum | openerp/addons/auth_crypt/auth_crypt.py | 9 | 5849 | #
# Implements encrypting functions.
#
# Copyright (c) 2008, F S 3 Consulting Inc.
#
# Maintainer:
# Alec Joseph Rivera (agi<at>fs3.ph)
# refactored by Antony Lesuisse <al<at>openerp.com>
#
import hashlib
import hmac
import logging
from random import sample
from string import ascii_letters, digits
import openerp
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
magic_md5 = '$1$'
magic_sha256 = '$5$'
openerp.addons.base.res.res_users.USER_PRIVATE_FIELDS.append('password_crypt')
def gen_salt(length=8, symbols=None):
if symbols is None:
symbols = ascii_letters + digits
return ''.join(sample(symbols, length))
def md5crypt( raw_pw, salt, magic=magic_md5 ):
""" md5crypt FreeBSD crypt(3) based on but different from md5
The md5crypt is based on Mark Johnson's md5crypt.py, which in turn is
based on FreeBSD src/lib/libcrypt/crypt.c (1.2) by Poul-Henning Kamp.
Mark's port can be found in ActiveState ASPN Python Cookbook. Kudos to
Poul and Mark. -agi
Original license:
* "THE BEER-WARE LICENSE" (Revision 42):
*
* <phk@login.dknet.dk> wrote this file. As long as you retain this
* notice you can do whatever you want with this stuff. If we meet some
* day, and you think this stuff is worth it, you can buy me a beer in
* return.
*
* Poul-Henning Kamp
"""
raw_pw = raw_pw.encode('utf-8')
salt = salt.encode('utf-8')
hash = hashlib.md5()
hash.update( raw_pw + magic + salt )
st = hashlib.md5()
st.update( raw_pw + salt + raw_pw)
stretch = st.digest()
for i in range( 0, len( raw_pw ) ):
hash.update( stretch[i % 16] )
i = len( raw_pw )
while i:
if i & 1:
hash.update('\x00')
else:
hash.update( raw_pw[0] )
i >>= 1
saltedmd5 = hash.digest()
for i in range( 1000 ):
hash = hashlib.md5()
if i & 1:
hash.update( raw_pw )
else:
hash.update( saltedmd5 )
if i % 3:
hash.update( salt )
if i % 7:
hash.update( raw_pw )
if i & 1:
hash.update( saltedmd5 )
else:
hash.update( raw_pw )
saltedmd5 = hash.digest()
itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
rearranged = ''
for a, b, c in ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5)):
v = ord( saltedmd5[a] ) << 16 | ord( saltedmd5[b] ) << 8 | ord( saltedmd5[c] )
for i in range(4):
rearranged += itoa64[v & 0x3f]
v >>= 6
v = ord( saltedmd5[11] )
for i in range( 2 ):
rearranged += itoa64[v & 0x3f]
v >>= 6
return magic + salt + '$' + rearranged
def sh256crypt(cls, password, salt, magic=magic_sha256):
iterations = 1000
# see http://en.wikipedia.org/wiki/PBKDF2
result = password.encode('utf8')
for i in xrange(cls.iterations):
result = hmac.HMAC(result, salt, hashlib.sha256).digest() # uses HMAC (RFC 2104) to apply salt
result = result.encode('base64') # doesnt seem to be crypt(3) compatible
return '%s%s$%s' % (magic_sha256, salt, result)
class res_users(osv.osv):
_inherit = "res.users"
def init(self, cr):
"""Encrypt all passwords at module installation"""
cr.execute("SELECT id, password FROM res_users WHERE password IS NOT NULL and password != ''")
for user in cr.fetchall():
self._set_encrypted_password(cr, user[0], user[1])
def _set_encrypted_password(self, cr, uid, plain_password):
"""Set an encrypted password for a given user"""
salt = gen_salt()
stored_password_crypt = md5crypt(plain_password, salt)
cr.execute("UPDATE res_users SET password = '', password_crypt = %s WHERE id = %s",
(stored_password_crypt, uid))
def set_pw(self, cr, uid, id, name, value, args, context):
if value:
self._set_encrypted_password(cr, id, value)
del value
def get_pw( self, cr, uid, ids, name, args, context ):
cr.execute('select id, password from res_users where id in %s', (tuple(map(int, ids)),))
stored_pws = cr.fetchall()
res = {}
for id, stored_pw in stored_pws:
res[id] = stored_pw
return res
_columns = {
'password': fields.function(get_pw, fnct_inv=set_pw, type='char', string='Password', invisible=True, store=True),
'password_crypt': fields.char(string='Encrypted Password', invisible=True),
}
def check_credentials(self, cr, uid, password):
# convert to base_crypt if needed
cr.execute('SELECT password, password_crypt FROM res_users WHERE id=%s AND active', (uid,))
if cr.rowcount:
stored_password, stored_password_crypt = cr.fetchone()
if stored_password and not stored_password_crypt:
self._set_encrypted_password(cr, uid, stored_password)
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
# check md5crypt
if stored_password_crypt:
if stored_password_crypt[:len(magic_md5)] == magic_md5:
salt = stored_password_crypt[len(magic_md5):11]
if stored_password_crypt == md5crypt(password, salt):
return
elif stored_password_crypt[:len(magic_md5)] == magic_sha256:
salt = stored_password_crypt[len(magic_md5):11]
if stored_password_crypt == md5crypt(password, salt):
return
# Reraise password incorrect
raise
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rupran/ansible | lib/ansible/modules/cloud/cloudstack/cs_vmsnapshot.py | 12 | 8731 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_vmsnapshot
short_description: Manages VM snapshots on Apache CloudStack based clouds.
description:
- Create, remove and revert VM from snapshots.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Unique Name of the snapshot. In CloudStack terms display name.
required: true
aliases: ['display_name']
vm:
description:
- Name of the virtual machine.
required: true
description:
description:
- Description of the snapshot.
required: false
default: null
snapshot_memory:
description:
- Snapshot memory if set to true.
required: false
default: false
zone:
description:
- Name of the zone in which the VM is in. If not set, default zone is used.
required: false
default: null
project:
description:
- Name of the project the VM is assigned to.
required: false
default: null
state:
description:
- State of the snapshot.
required: false
default: 'present'
choices: [ 'present', 'absent', 'revert' ]
domain:
description:
- Domain the VM snapshot is related to.
required: false
default: null
account:
description:
- Account the VM snapshot is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a VM snapshot of disk and memory before an upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
snapshot_memory: yes
# Revert a VM to a snapshot after a failed upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
state: revert
# Remove a VM snapshot after successful upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
state: absent
'''
RETURN = '''
---
id:
description: UUID of the snapshot.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the snapshot.
returned: success
type: string
sample: snapshot before update
display_name:
description: Display name of the snapshot.
returned: success
type: string
sample: snapshot before update
created:
description: date of the snapshot.
returned: success
type: string
sample: 2015-03-29T14:57:06+0200
current:
description: true if snapshot is current
returned: success
type: boolean
sample: True
state:
description: state of the vm snapshot
returned: success
type: string
sample: Allocated
type:
description: type of vm snapshot
returned: success
type: string
sample: DiskAndMemory
description:
description: description of vm snapshot
returned: success
type: string
sample: snapshot brought to you by Ansible
domain:
description: Domain the the vm snapshot is related to.
returned: success
type: string
sample: example domain
account:
description: Account the vm snapshot is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the vm snapshot is related to.
returned: success
type: string
sample: Production
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVmSnapshot, self).__init__(module)
self.returns = {
'type': 'type',
'current': 'current',
}
def get_snapshot(self):
args = {}
args['virtualmachineid'] = self.get_vm('id')
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
args['projectid'] = self.get_project('id')
args['name'] = self.module.params.get('name')
snapshots = self.cs.listVMSnapshot(**args)
if snapshots:
return snapshots['vmSnapshot'][0]
return None
def create_snapshot(self):
snapshot = self.get_snapshot()
if not snapshot:
self.result['changed'] = True
args = {}
args['virtualmachineid'] = self.get_vm('id')
args['name'] = self.module.params.get('name')
args['description'] = self.module.params.get('description')
args['snapshotmemory'] = self.module.params.get('snapshot_memory')
if not self.module.check_mode:
res = self.cs.createVMSnapshot(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
snapshot = self.poll_job(res, 'vmsnapshot')
return snapshot
def remove_snapshot(self):
snapshot = self.get_snapshot()
if snapshot:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'vmsnapshot')
return snapshot
def revert_vm_to_snapshot(self):
snapshot = self.get_snapshot()
if snapshot:
self.result['changed'] = True
if snapshot['state'] != "Ready":
self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state'])
if not self.module.check_mode:
res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'vmsnapshot')
return snapshot
self.module.fail_json(msg="snapshot not found, could not revert VM")
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True, aliases=['display_name']),
vm = dict(required=True),
description = dict(default=None),
zone = dict(default=None),
snapshot_memory = dict(type='bool', default=False),
state = dict(choices=['present', 'absent', 'revert'], default='present'),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['icmp_type', 'icmp_code'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
supports_check_mode=True
)
try:
acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module)
state = module.params.get('state')
if state in ['revert']:
snapshot = acs_vmsnapshot.revert_vm_to_snapshot()
elif state in ['absent']:
snapshot = acs_vmsnapshot.remove_snapshot()
else:
snapshot = acs_vmsnapshot.create_snapshot()
result = acs_vmsnapshot.get_result(snapshot)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
srvg/ansible-modules-extras | packaging/language/npm.py | 32 | 8975 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chris Hoffman <christopher.hoffman@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: npm
short_description: Manage node.js packages with npm
description:
- Manage node.js packages with Node Package Manager (npm)
version_added: 1.2
author: "Chris Hoffman (@chrishoffman)"
options:
name:
description:
- The name of a node.js library to install
required: false
path:
description:
- The base path where to install the node.js libraries
required: false
version:
description:
- The version to be installed
required: false
global:
description:
- Install the node.js library globally
required: false
default: no
choices: [ "yes", "no" ]
executable:
description:
- The executable location for npm.
- This is useful if you are using a version manager, such as nvm
required: false
ignore_scripts:
description:
- Use the --ignore-scripts flag when installing.
required: false
choices: [ "yes", "no" ]
default: no
version_added: "1.8"
production:
description:
- Install dependencies in production mode, excluding devDependencies
required: false
choices: [ "yes", "no" ]
default: no
registry:
description:
- The registry to install modules from.
required: false
version_added: "1.6"
state:
description:
- The state of the node.js library
required: false
default: present
choices: [ "present", "absent", "latest" ]
'''
EXAMPLES = '''
description: Install "coffee-script" node.js package.
- npm:
name: coffee-script
path: /app/location
description: Install "coffee-script" node.js package on version 1.6.1.
- npm:
name: coffee-script
version: '1.6.1'
path: /app/location
description: Install "coffee-script" node.js package globally.
- npm:
name: coffee-script
global: yes
description: Remove the globally package "coffee-script".
- npm:
name: coffee-script
global: yes
state: absent
description: Install "coffee-script" node.js package from custom registry.
- npm:
name: coffee-script
registry: 'http://registry.mysite.com'
description: Install packages based on package.json.
- npm:
path: /app/location
description: Update packages based on package.json to their latest version.
- npm:
path: /app/location
state: latest
description: Install packages based on package.json using the npm installed with nvm v0.10.1.
- npm:
path: /app/location
executable: /opt/nvm/v0.10.1/bin/npm
state: present
'''
import os
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
class Npm(object):
def __init__(self, module, **kwargs):
self.module = module
self.glbl = kwargs['glbl']
self.name = kwargs['name']
self.version = kwargs['version']
self.path = kwargs['path']
self.registry = kwargs['registry']
self.production = kwargs['production']
self.ignore_scripts = kwargs['ignore_scripts']
if kwargs['executable']:
self.executable = kwargs['executable'].split(' ')
else:
self.executable = [module.get_bin_path('npm', True)]
if kwargs['version']:
self.name_version = self.name + '@' + str(self.version)
else:
self.name_version = self.name
def _exec(self, args, run_in_check_mode=False, check_rc=True):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = self.executable + args
if self.glbl:
cmd.append('--global')
if self.production:
cmd.append('--production')
if self.ignore_scripts:
cmd.append('--ignore-scripts')
if self.name:
cmd.append(self.name_version)
if self.registry:
cmd.append('--registry')
cmd.append(self.registry)
#If path is specified, cd into that path and run the command.
cwd = None
if self.path:
if not os.path.exists(self.path):
os.makedirs(self.path)
if not os.path.isdir(self.path):
self.module.fail_json(msg="path %s is not a directory" % self.path)
cwd = self.path
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
return out
return ''
def list(self):
cmd = ['list', '--json']
installed = list()
missing = list()
data = json.loads(self._exec(cmd, True, False))
if 'dependencies' in data:
for dep in data['dependencies']:
if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
missing.append(dep)
elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
missing.append(dep)
else:
installed.append(dep)
if self.name and self.name not in installed:
missing.append(self.name)
#Named dependency not installed
else:
missing.append(self.name)
return installed, missing
def install(self):
return self._exec(['install'])
def update(self):
return self._exec(['update'])
def uninstall(self):
return self._exec(['uninstall'])
def list_outdated(self):
outdated = list()
data = self._exec(['outdated'], True, False)
for dep in data.splitlines():
if dep:
# node.js v0.10.22 changed the `npm outdated` module separator
# from "@" to " ". Split on both for backwards compatibility.
pkg, other = re.split('\s|@', dep, 1)
outdated.append(pkg)
return outdated
def main():
arg_spec = dict(
name=dict(default=None),
path=dict(default=None, type='path'),
version=dict(default=None),
production=dict(default='no', type='bool'),
executable=dict(default=None, type='path'),
registry=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
)
arg_spec['global'] = dict(default='no', type='bool')
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
path = module.params['path']
version = module.params['version']
glbl = module.params['global']
production = module.params['production']
executable = module.params['executable']
registry = module.params['registry']
state = module.params['state']
ignore_scripts = module.params['ignore_scripts']
if not path and not glbl:
module.fail_json(msg='path must be specified when not using global')
if state == 'absent' and not name:
module.fail_json(msg='uninstalling a package is only available for named packages')
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \
executable=executable, registry=registry, ignore_scripts=ignore_scripts)
changed = False
if state == 'present':
installed, missing = npm.list()
if len(missing):
changed = True
npm.install()
elif state == 'latest':
installed, missing = npm.list()
outdated = npm.list_outdated()
if len(missing):
changed = True
npm.install()
if len(outdated):
changed = True
npm.update()
else: #absent
installed, missing = npm.list()
if name in installed:
changed = True
npm.uninstall()
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
lhelwerd/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/gentools.py | 214 | 6644 | #! /usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library for supporting message and service generation for all ROS
client libraries. This is mainly responsible for calculating the
md5sums and message definitions of classes.
"""
# NOTE: this should not contain any rospy-specific code. The rospy
# generator library is rospy.genpy.
import sys
import hashlib
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
from . import msgs
from .msgs import InvalidMsgSpec, MsgSpec, bare_msg_type, is_builtin
from .msg_loader import load_depends
from .srvs import SrvSpec
from . import names
from . import base
def compute_md5_text(msg_context, spec):
"""
Compute the text used for md5 calculation. MD5 spec states that we
removes comments and non-meaningful whitespace. We also strip
packages names from type names. For convenience sake, constants are
reordered ahead of other declarations, in the order that they were
originally defined.
:returns: text for ROS MD5-processing, ``str``
"""
package = spec.package
buff = StringIO()
for c in spec.constants:
buff.write("%s %s=%s\n"%(c.type, c.name, c.val_text))
for type_, name in zip(spec.types, spec.names):
msg_type = bare_msg_type(type_)
# md5 spec strips package names
if is_builtin(msg_type):
buff.write("%s %s\n"%(type_, name))
else:
# recursively generate md5 for subtype. have to build up
# dependency representation for subtype in order to
# generate md5
sub_pkg, _ = names.package_resource_name(msg_type)
sub_pkg = sub_pkg or package
sub_spec = msg_context.get_registered(msg_type)
sub_md5 = compute_md5(msg_context, sub_spec)
buff.write("%s %s\n"%(sub_md5, name))
return buff.getvalue().strip() # remove trailing new line
def _compute_hash(msg_context, spec, hash):
"""
subroutine of compute_md5()
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute hash for.
:param hash: hash instance
"""
# accumulate the hash
# - root file
if isinstance(spec, MsgSpec):
hash.update(compute_md5_text(msg_context, spec).encode())
elif isinstance(spec, SrvSpec):
hash.update(compute_md5_text(msg_context, spec.request).encode())
hash.update(compute_md5_text(msg_context, spec.response).encode())
else:
raise Exception("[%s] is not a message or service"%spec)
return hash.hexdigest()
def compute_md5(msg_context, spec):
"""
Compute md5 hash for message/service
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute md5 for.
:returns: md5 hash, ``str``
"""
return _compute_hash(msg_context, spec, hashlib.md5())
## alias
compute_md5_v2 = compute_md5
def _unique_deps(dep_list):
uniques = []
for d in dep_list:
if d not in uniques:
uniques.append(d)
return uniques
def compute_full_text(msg_context, spec):
"""
Compute full text of message/service, including text of embedded
types. The text of the main msg/srv is listed first. Embedded
msg/srv files are denoted first by an 80-character '=' separator,
followed by a type declaration line,'MSG: pkg/type', followed by
the text of the embedded type.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute full text for.
:returns: concatenated text for msg/srv file and embedded msg/srv types, ``str``
"""
buff = StringIO()
sep = '='*80+'\n'
# write the text of the top-level type
buff.write(spec.text)
buff.write('\n')
# append the text of the dependencies (embedded types). Can't use set() as we have to preserve order.
for d in _unique_deps(msg_context.get_all_depends(spec.full_name)):
buff.write(sep)
buff.write("MSG: %s\n"%d)
buff.write(msg_context.get_registered(d).text)
buff.write('\n')
# #1168: remove the trailing \n separator that is added by the concatenation logic
return buff.getvalue()[:-1]
def compute_full_type_name(package_name, file_name):
"""
Compute the full type name of message/service 'pkg/type'.
:param package_name: name of package file is in, ``str``
:file_name: name of the msg og srv file, ``str``
:returns: typename in format 'pkg/type'
:raises: :exc:`MsgGenerationException` if file_name ends with an unknown file extension
"""
# strip extension
for ext in (base.EXT_MSG, base.EXT_SRV):
if file_name.endswith(ext):
short_name = file_name[:-len(ext)]
break
else:
raise base.MsgGenerationException("Processing file: '%s' - unknown file extension"% (file_name))
return "%s/%s"%(package_name, short_name)
| gpl-3.0 |
byterom/android_external_chromium_org | tools/telemetry/telemetry/page/actions/pinch.py | 45 | 3396 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.page.actions import page_action
class PinchAction(page_action.PageAction):
def __init__(self, selector=None, text=None, element_function=None,
left_anchor_ratio=0.5, top_anchor_ratio=0.5,
scale_factor=None, speed_in_pixels_per_second=800):
super(PinchAction, self).__init__()
self._selector = selector
self._text = text
self._element_function = element_function
self._left_anchor_ratio = left_anchor_ratio
self._top_anchor_ratio = top_anchor_ratio
self._scale_factor = scale_factor
self._speed = speed_in_pixels_per_second
if (self._selector is None and self._text is None and
self._element_function is None):
self._element_function = 'document.body'
def WillRunAction(self, tab):
for js_file in ['gesture_common.js', 'pinch.js']:
with open(os.path.join(os.path.dirname(__file__), js_file)) as f:
js = f.read()
tab.ExecuteJavaScript(js)
# Fail if browser doesn't support synthetic pinch gestures.
if not tab.EvaluateJavaScript('window.__PinchAction_SupportedByBrowser()'):
raise page_action.PageActionNotSupported(
'Synthetic pinch not supported for this browser')
# TODO(dominikg): Remove once JS interface changes have rolled into stable.
if not tab.EvaluateJavaScript('chrome.gpuBenchmarking.newPinchInterface'):
raise page_action.PageActionNotSupported(
'This version of the browser doesn\'t support the new JS interface '
'for pinch gestures.')
if (page_action.GetGestureSourceTypeFromOptions(tab) ==
'chrome.gpuBenchmarking.MOUSE_INPUT'):
raise page_action.PageActionNotSupported(
'Pinch page action does not support mouse input')
if not page_action.IsGestureSourceTypeSupported(tab, 'touch'):
raise page_action.PageActionNotSupported(
'Touch input not supported for this browser')
done_callback = 'function() { window.__pinchActionDone = true; }'
tab.ExecuteJavaScript("""
window.__pinchActionDone = false;
window.__pinchAction = new __PinchAction(%s);"""
% done_callback)
@staticmethod
def _GetDefaultScaleFactorForPage(tab):
current_scale_factor = tab.EvaluateJavaScript(
'window.outerWidth / window.innerWidth')
return 3.0 / current_scale_factor
def RunAction(self, tab):
scale_factor = (self._scale_factor if self._scale_factor else
PinchAction._GetDefaultScaleFactorForPage(tab))
code = '''
function(element, info) {
if (!element) {
throw Error('Cannot find element: ' + info);
}
window.__pinchAction.start({
element: element,
left_anchor_ratio: %s,
top_anchor_ratio: %s,
scale_factor: %s,
speed: %s
});
}''' % (self._left_anchor_ratio,
self._top_anchor_ratio,
scale_factor,
self._speed)
page_action.EvaluateCallbackWithElement(
tab, code, selector=self._selector, text=self._text,
element_function=self._element_function)
tab.WaitForJavaScriptExpression('window.__pinchActionDone', 60)
| bsd-3-clause |
alilotfi/django | tests/template_tests/filter_tests/test_ljust.py | 521 | 1081 | from django.template.defaultfilters import ljust
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LjustTests(SimpleTestCase):
@setup({'ljust01': '{% autoescape off %}.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.{% endautoescape %}'})
def test_ljust01(self):
output = self.engine.render_to_string('ljust01', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ".a&b . .a&b .")
@setup({'ljust02': '.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.'})
def test_ljust02(self):
output = self.engine.render_to_string('ljust02', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ".a&b . .a&b .")
class FunctionTests(SimpleTestCase):
def test_ljust(self):
self.assertEqual(ljust('test', 10), 'test ')
self.assertEqual(ljust('test', 3), 'test')
def test_less_than_string_length(self):
self.assertEqual(ljust('test', 3), 'test')
def test_non_string_input(self):
self.assertEqual(ljust(123, 4), '123 ')
| bsd-3-clause |
qwefi/nova | nova/api/openstack/compute/plugins/v3/evacuate.py | 3 | 3575 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import utils
LOG = logging.getLogger(__name__)
ALIAS = "os-evacuate"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class EvacuateController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(EvacuateController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('evacuate')
def _evacuate(self, req, id, body):
"""
Permit admins to evacuate a server from a failed host
to a new one.
"""
context = req.environ["nova.context"]
authorize(context)
try:
if len(body) != 1:
raise exc.HTTPBadRequest(_("Malformed request body"))
evacuate_body = body["evacuate"]
host = evacuate_body["host"]
on_shared_storage = strutils.bool_from_string(
evacuate_body["on_shared_storage"])
password = None
if 'admin_password' in evacuate_body:
# check that if requested to evacuate server on shared storage
# password not specified
if on_shared_storage:
msg = _("admin password can't be changed on existing disk")
raise exc.HTTPBadRequest(explanation=msg)
password = evacuate_body['admin_password']
elif not on_shared_storage:
password = utils.generate_password()
except (TypeError, KeyError):
msg = _("host and on_shared_storage must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id)
self.compute_api.evacuate(context, instance, host,
on_shared_storage, password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate')
except Exception as e:
msg = _("Error in evacuate, %s") % e
LOG.exception(msg, instance=instance)
raise exc.HTTPBadRequest(explanation=msg)
return {'admin_password': password}
class Evacuate(extensions.V3APIExtensionBase):
"""Enables server evacuation."""
name = "Evacuate"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/evacuate/api/v3"
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
controller = EvacuateController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
2ndQuadrant/ansible | test/runner/lib/classification.py | 4 | 31029 | """Classify changes in Ansible code."""
from __future__ import absolute_import, print_function
import collections
import os
import re
import time
from lib.target import (
walk_module_targets,
walk_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
load_integration_prefixes,
analyze_integration_target_dependencies,
)
from lib.util import (
display,
)
from lib.import_analysis import (
get_python_module_utils_imports,
)
from lib.csharp_import_analysis import (
get_csharp_module_utils_imports,
)
from lib.powershell_import_analysis import (
get_powershell_module_utils_imports,
)
from lib.config import (
TestConfig,
IntegrationConfig,
)
from lib.metadata import (
ChangeDescription,
)
FOCUSED_TARGET = '__focused__'
def categorize_changes(args, paths, verbose_command=None):
"""
:type args: TestConfig
:type paths: list[str]
:type verbose_command: str
:rtype: ChangeDescription
"""
mapper = PathMapper(args)
commands = {
'sanity': set(),
'units': set(),
'integration': set(),
'windows-integration': set(),
'network-integration': set(),
}
focused_commands = collections.defaultdict(set)
deleted_paths = set()
original_paths = set()
additional_paths = set()
no_integration_paths = set()
for path in paths:
if not os.path.exists(path):
deleted_paths.add(path)
continue
original_paths.add(path)
dependent_paths = mapper.get_dependent_paths(path)
if not dependent_paths:
continue
display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
for dependent_path in dependent_paths:
display.info(dependent_path, verbosity=2)
additional_paths.add(dependent_path)
additional_paths -= set(paths) # don't count changed paths as additional paths
if additional_paths:
display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
paths = sorted(set(paths) | additional_paths)
display.info('Mapping %d changed file(s) to tests.' % len(paths))
none_count = 0
for path in paths:
tests = mapper.classify(path)
if tests is None:
focused_target = False
display.info('%s -> all' % path, verbosity=1)
tests = all_tests(args) # not categorized, run all tests
display.warning('Path not categorized: %s' % path)
else:
focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
tests = dict((key, value) for key, value in tests.items() if value)
if focused_target and not any('integration' in command for command in tests):
no_integration_paths.add(path) # path triggers no integration tests
if verbose_command:
result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
# identify targeted integration tests (those which only target a single integration command)
if 'integration' in verbose_command and tests.get(verbose_command):
if not any('integration' in command for command in tests if command != verbose_command):
if focused_target:
result += ' (focused)'
result += ' (targeted)'
else:
result = '%s' % tests
if not tests.get(verbose_command):
# minimize excessive output from potentially thousands of files which do not trigger tests
none_count += 1
verbosity = 2
else:
verbosity = 1
if args.verbosity >= verbosity:
display.info('%s -> %s' % (path, result), verbosity=1)
for command, target in tests.items():
commands[command].add(target)
if focused_target:
focused_commands[command].add(target)
if none_count > 0 and args.verbosity < 2:
display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
for command in commands:
commands[command].discard('none')
if any(t == 'all' for t in commands[command]):
commands[command] = set(['all'])
commands = dict((c, sorted(commands[c])) for c in commands if commands[c])
focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands)
for command in commands:
if commands[command] == ['all']:
commands[command] = [] # changes require testing all targets, do not filter targets
changes = ChangeDescription()
changes.command = verbose_command
changes.changed_paths = sorted(original_paths)
changes.deleted_paths = sorted(deleted_paths)
changes.regular_command_targets = commands
changes.focused_command_targets = focused_commands
changes.no_integration_paths = sorted(no_integration_paths)
return changes
class PathMapper(object):
"""Map file paths to test commands and targets."""
def __init__(self, args):
"""
:type args: TestConfig
"""
self.args = args
self.integration_all_target = get_integration_all_target(self.args)
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.powershell_targets = [t for t in self.sanity_targets if os.path.splitext(t.path)[1] == '.ps1']
self.csharp_targets = [t for t in self.sanity_targets if os.path.splitext(t.path)[1] == '.cs']
self.units_modules = set(t.module for t in self.units_targets if t.module)
self.units_paths = set(a for t in self.units_targets for a in t.aliases)
self.sanity_paths = set(t.path for t in self.sanity_targets)
self.module_names_by_path = dict((t.path, t.module) for t in self.module_targets)
self.integration_targets_by_name = dict((t.name, t) for t in self.integration_targets)
self.integration_targets_by_alias = dict((a, t) for t in self.integration_targets for a in t.aliases)
self.posix_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'posix/' in t.aliases for m in t.modules)
self.windows_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'windows/' in t.aliases for m in t.modules)
self.network_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'network/' in t.aliases for m in t.modules)
self.prefixes = load_integration_prefixes()
self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.paths_to_dependent_targets = {}
for target in self.integration_targets:
for path in target.needs_file:
if path not in self.paths_to_dependent_targets:
self.paths_to_dependent_targets[path] = set()
self.paths_to_dependent_targets[path].add(target)
def get_dependent_paths(self, path):
"""
:type path: str
:rtype: list[str]
"""
unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
paths = set()
while unprocessed_paths:
queued_paths = list(unprocessed_paths)
paths |= unprocessed_paths
unprocessed_paths = set()
for queued_path in queued_paths:
new_paths = self.get_dependent_paths_non_recursive(queued_path)
for new_path in new_paths:
if new_path not in paths:
unprocessed_paths.add(new_path)
return sorted(paths)
def get_dependent_paths_non_recursive(self, path):
"""
:type path: str
:rtype: list[str]
"""
paths = self.get_dependent_paths_internal(path)
paths += [t.path + '/' for t in self.paths_to_dependent_targets.get(path, set())]
paths = sorted(set(paths))
return paths
def get_dependent_paths_internal(self, path):
"""
:type path: str
:rtype: list[str]
"""
ext = os.path.splitext(os.path.split(path)[1])[1]
if path.startswith('lib/ansible/module_utils/'):
if ext == '.py':
return self.get_python_module_utils_usage(path)
if ext == '.psm1':
return self.get_powershell_module_utils_usage(path)
if ext == '.cs':
return self.get_csharp_module_utils_usage(path)
if path.startswith('test/integration/targets/'):
return self.get_integration_target_usage(path)
return []
def get_python_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if path == 'lib/ansible/module_utils/__init__.py':
return []
if not self.python_module_utils_imports:
display.info('Analyzing python module_utils imports...')
before = time.time()
self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
after = time.time()
display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
name = os.path.splitext(path)[0].replace('/', '.')[4:]
if name.endswith('.__init__'):
name = name[:-9]
return sorted(self.python_module_utils_imports[name])
def get_powershell_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if not self.powershell_module_utils_imports:
display.info('Analyzing powershell module_utils imports...')
before = time.time()
self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
after = time.time()
display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
name = os.path.splitext(os.path.basename(path))[0]
return sorted(self.powershell_module_utils_imports[name])
def get_csharp_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if not self.csharp_module_utils_imports:
display.info('Analyzing C# module_utils imports...')
before = time.time()
self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
after = time.time()
display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
name = os.path.splitext(os.path.basename(path))[0]
return sorted(self.csharp_module_utils_imports[name])
def get_integration_target_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
target_name = path.split('/')[3]
dependents = [os.path.join('test/integration/targets/%s/' % target) for target in sorted(self.integration_dependencies.get(target_name, set()))]
return dependents
def classify(self, path):
"""
:type path: str
:rtype: dict[str, str] | None
"""
result = self._classify(path)
# run all tests when no result given
if result is None:
return None
# run sanity on path unless result specified otherwise
if path in self.sanity_paths and 'sanity' not in result:
result['sanity'] = path
return result
def _classify(self, path):
"""
:type path: str
:rtype: dict[str, str] | None
"""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if path.startswith('.github/'):
return minimal
if path.startswith('bin/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('contrib/'):
return {
'units': 'test/units/contrib/'
}
if path.startswith('changelogs/'):
return minimal
if path.startswith('docs/'):
return minimal
if path.startswith('examples/'):
if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
return {
'windows-integration': 'connection_winrm',
}
return minimal
if path.startswith('hacking/'):
return minimal
if path.startswith('lib/ansible/executor/powershell/'):
units_path = 'test/units/executor/powershell/'
if units_path not in self.units_paths:
units_path = None
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if path.startswith('lib/ansible/modules/'):
module_name = self.module_names_by_path.get(path)
if module_name:
return {
'units': module_name if module_name in self.units_modules else None,
'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
'network-integration': self.network_integration_by_module.get(module_name),
FOCUSED_TARGET: True,
}
return minimal
if path.startswith('lib/ansible/module_utils/'):
if ext == '.cs':
return minimal # already expanded using get_dependent_paths
if ext == '.psm1':
return minimal # already expanded using get_dependent_paths
if ext == '.py':
return minimal # already expanded using get_dependent_paths
if path.startswith('lib/ansible/plugins/action/'):
if ext == '.py':
if name.startswith('net_'):
network_target = 'network/.*_%s' % name[4:]
if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
return {
'network-integration': network_target,
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if self.prefixes.get(name) == 'network':
network_platform = name
elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
network_platform = name[:-7]
elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
network_platform = name[:-9]
else:
network_platform = None
if network_platform:
network_target = 'network/%s/' % network_platform
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
if path.startswith('lib/ansible/plugins/connection/'):
if name == '__init__':
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': 'test/units/plugins/connection/',
}
units_path = 'test/units/plugins/connection/test_%s.py' % name
if units_path not in self.units_paths:
units_path = None
integration_name = 'connection_%s' % name
if integration_name not in self.integration_targets_by_name:
integration_name = None
windows_integration_name = 'connection_windows_%s' % name
if windows_integration_name not in self.integration_targets_by_name:
windows_integration_name = None
# entire integration test commands depend on these connection plugins
if name in ['winrm', 'psrp']:
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if name == 'local':
return {
'integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'network_cli':
return {
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'paramiko_ssh':
return {
'integration': integration_name,
'network-integration': self.integration_all_target,
'units': units_path,
}
# other connection plugins have isolated integration and unit tests
return {
'integration': integration_name,
'windows-integration': windows_integration_name,
'units': units_path,
}
if path.startswith('lib/ansible/plugins/inventory/'):
if name == '__init__':
return all_tests(self.args) # broad impact, run all tests
# These inventory plugins are enabled by default (see INVENTORY_ENABLED).
# Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
test_all = [
'host_list',
'script',
'yaml',
'ini',
'auto',
]
if name in test_all:
posix_integration_fallback = get_integration_all_target(self.args)
else:
posix_integration_fallback = None
target = self.integration_targets_by_name.get('inventory_%s' % name)
units_path = 'test/units/plugins/inventory/test_%s.py' % name
if units_path not in self.units_paths:
units_path = None
return {
'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
'windows-integration': target.name if target and 'windows/' in target.aliases else None,
'network-integration': target.name if target and 'network/' in target.aliases else None,
'units': units_path,
FOCUSED_TARGET: target is not None,
}
if (path.startswith('lib/ansible/plugins/terminal/') or
path.startswith('lib/ansible/plugins/cliconf/') or
path.startswith('lib/ansible/plugins/netconf/')):
if ext == '.py':
if name in self.prefixes and self.prefixes[name] == 'network':
network_target = 'network/%s/' % name
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if path.startswith('lib/ansible/plugins/doc_fragments/'):
return {
'sanity': 'all',
}
if path.startswith('lib/ansible/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('packaging/'):
if path.startswith('packaging/requirements/'):
if name.startswith('requirements-') and ext == '.txt':
component = name.split('-', 1)[1]
candidates = (
'cloud/%s/' % component,
)
for candidate in candidates:
if candidate in self.integration_targets_by_alias:
return {
'integration': candidate,
}
return all_tests(self.args) # broad impact, run all tests
return minimal
if path.startswith('test/cache/'):
return minimal
if path.startswith('test/results/'):
return minimal
if path.startswith('test/legacy/'):
return minimal
if path.startswith('test/env/'):
return minimal
if path.startswith('test/integration/roles/'):
return minimal
if path.startswith('test/integration/targets/'):
if not os.path.exists(path):
return minimal
target = self.integration_targets_by_name[path.split('/')[3]]
if 'hidden/' in target.aliases:
if target.type == 'role':
return minimal # already expanded using get_dependent_paths
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
return {
'integration': target.name if 'posix/' in target.aliases else None,
'windows-integration': target.name if 'windows/' in target.aliases else None,
'network-integration': target.name if 'network/' in target.aliases else None,
FOCUSED_TARGET: True,
}
if path.startswith('test/integration/'):
if dirname == 'test/integration':
if self.prefixes.get(name) == 'network' and ext == '.yaml':
return minimal # network integration test playbooks are not used by ansible-test
if filename == 'network-all.yaml':
return minimal # network integration test playbook not used by ansible-test
if filename == 'platform_agnostic.yaml':
return minimal # network integration test playbook not used by ansible-test
if filename.startswith('inventory.') and filename.endswith('.template'):
return minimal # ansible-test does not use these inventory templates
if filename == 'inventory':
return {
'integration': self.integration_all_target,
}
for command in (
'integration',
'windows-integration',
'network-integration',
):
if name == command and ext == '.cfg':
return {
command: self.integration_all_target,
}
if name.startswith('cloud-config-'):
cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
if path.startswith('test/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/units/'):
if path in self.units_paths:
return {
'units': path,
}
if path.startswith('test/units/compat/'):
return {
'units': 'test/units/',
}
# changes to files which are not unit tests should trigger tests from the nearest parent directory
test_path = os.path.dirname(path)
while test_path:
if test_path + '/' in self.units_paths:
return {
'units': test_path + '/',
}
test_path = os.path.dirname(test_path)
if path.startswith('test/runner/completion/'):
if path == 'test/runner/completion/docker.txt':
return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
if path.startswith('test/runner/lib/cloud/'):
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/runner/lib/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/runner/requirements/'):
if name in (
'integration',
'network-integration',
'windows-integration',
):
return {
name: self.integration_all_target,
}
if name in (
'sanity',
'units',
):
return {
name: 'all',
}
if name.startswith('integration.cloud.'):
cloud_target = 'cloud/%s/' % name.split('.')[2]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
if path.startswith('test/runner/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/shippable/tools/'):
return minimal # not used by tests
if path.startswith('test/utils/shippable/'):
if dirname == 'test/utils/shippable':
test_map = {
'cloud.sh': 'integration:cloud/',
'freebsd.sh': 'integration:all',
'linux.sh': 'integration:all',
'network.sh': 'network-integration:all',
'osx.sh': 'integration:all',
'rhel.sh': 'integration:all',
'sanity.sh': 'sanity:all',
'units.sh': 'units:all',
'windows.sh': 'windows-integration:all',
}
test_match = test_map.get(filename)
if test_match:
test_command, test_target = test_match.split(':')
return {
test_command: test_target,
}
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/'):
return minimal
if path == 'test/README.md':
return minimal
if path.startswith('ticket_stubs/'):
return minimal
if '/' not in path:
if path in (
'.gitattributes',
'.gitignore',
'.gitmodules',
'.mailmap',
'tox.ini', # obsolete
'COPYING',
'VERSION',
'Makefile',
):
return minimal
if path in (
'shippable.yml',
'.coveragerc',
):
return all_tests(self.args) # test infrastructure, run all tests
if path == 'setup.py':
return all_tests(self.args) # broad impact, run all tests
if path == '.yamllint':
return {
'sanity': 'all',
}
if ext in ('.md', '.rst', '.txt', '.xml', '.in'):
return minimal
return None # unknown, will result in fall-back to run all tests
def all_tests(args, force=False):
"""
:type args: TestConfig
:type force: bool
:rtype: dict[str, str]
"""
if force:
integration_all_target = 'all'
else:
integration_all_target = get_integration_all_target(args)
return {
'sanity': 'all',
'units': 'all',
'integration': integration_all_target,
'windows-integration': integration_all_target,
'network-integration': integration_all_target,
}
def get_integration_all_target(args):
"""
:type args: TestConfig
:rtype: str
"""
if isinstance(args, IntegrationConfig):
return args.changed_all_target
return 'all'
| gpl-3.0 |
kerr-huang/SL4A | python/src/Lib/test/test_zipimport_support.py | 53 | 9952 | # This test module covers support in various parts of the standard library
# for working with modules located inside zipfiles
# The tests are centralised in this fashion to make it easy to drop them
# if a platform doesn't support zipimport
import unittest
import test.test_support
import os
import os.path
import sys
import textwrap
import zipfile
import zipimport
import doctest
import inspect
import linecache
import pdb
verbose = test.test_support.verbose
# Library modules covered by this test set
# pdb (Issue 4201)
# inspect (Issue 4223)
# doctest (Issue 4197)
# Other test modules with zipimport related tests
# test_zipimport (of course!)
# test_cmd_line_script (covers the zipimport support in runpy)
# Retrieve some helpers from other test cases
from test import test_doctest, sample_doctest
from test.test_importhooks import ImportHooksBaseTestCase
from test.test_cmd_line_script import temp_dir, _run_python, \
_spawn_python, _kill_python, \
_make_test_script, \
_compile_test_script, \
_make_test_zip, _make_test_pkg
def _run_object_doctest(obj, module):
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = test.test_support.get_original_stdout()
try:
finder = doctest.DocTestFinder(verbose=verbose, recurse=False)
runner = doctest.DocTestRunner(verbose=verbose)
# Use the object's fully qualified name if it has one
# Otherwise, use the module's name
try:
name = "%s.%s" % (obj.__module__, obj.__name__)
except AttributeError:
name = module.__name__
for example in finder.find(obj, name, module):
runner.run(example)
f, t = runner.failures, runner.tries
if f:
raise test.test_support.TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
class ZipSupportTests(ImportHooksBaseTestCase):
# We use the ImportHooksBaseTestCase to restore
# the state of the import related information
# in the sys module after each test
# We also clear the linecache and zipimport cache
# just to avoid any bogus errors due to name reuse in the tests
def setUp(self):
linecache.clearcache()
zipimport._zip_directory_cache.clear()
ImportHooksBaseTestCase.setUp(self)
def test_inspect_getsource_issue4223(self):
test_src = "def foo(): pass\n"
with temp_dir() as d:
init_name = _make_test_script(d, '__init__', test_src)
name_in_zip = os.path.join('zip_pkg',
os.path.basename(init_name))
zip_name, run_name = _make_test_zip(d, 'test_zip',
init_name, name_in_zip)
os.remove(init_name)
sys.path.insert(0, zip_name)
import zip_pkg
self.assertEqual(inspect.getsource(zip_pkg.foo), test_src)
def test_doctest_issue4197(self):
# To avoid having to keep two copies of the doctest module's
# unit tests in sync, this test works by taking the source of
# test_doctest itself, rewriting it a bit to cope with a new
# location, and then throwing it in a zip file to make sure
# everything still works correctly
test_src = inspect.getsource(test_doctest)
test_src = test_src.replace(
"from test import test_doctest",
"import test_zipped_doctest as test_doctest")
test_src = test_src.replace("test.test_doctest",
"test_zipped_doctest")
test_src = test_src.replace("test.sample_doctest",
"sample_zipped_doctest")
sample_src = inspect.getsource(sample_doctest)
sample_src = sample_src.replace("test.test_doctest",
"test_zipped_doctest")
with temp_dir() as d:
script_name = _make_test_script(d, 'test_zipped_doctest',
test_src)
zip_name, run_name = _make_test_zip(d, 'test_zip',
script_name)
z = zipfile.ZipFile(zip_name, 'a')
z.writestr("sample_zipped_doctest.py", sample_src)
z.close()
if verbose:
zip_file = zipfile.ZipFile(zip_name, 'r')
print 'Contents of %r:' % zip_name
zip_file.printdir()
zip_file.close()
os.remove(script_name)
sys.path.insert(0, zip_name)
import test_zipped_doctest
# Some of the doc tests depend on the colocated text files
# which aren't available to the zipped version (the doctest
# module currently requires real filenames for non-embedded
# tests). So we're forced to be selective about which tests
# to run.
# doctest could really use some APIs which take a text
# string or a file object instead of a filename...
known_good_tests = [
test_zipped_doctest.SampleClass,
test_zipped_doctest.SampleClass.NestedClass,
test_zipped_doctest.SampleClass.NestedClass.__init__,
test_zipped_doctest.SampleClass.__init__,
test_zipped_doctest.SampleClass.a_classmethod,
test_zipped_doctest.SampleClass.a_property,
test_zipped_doctest.SampleClass.a_staticmethod,
test_zipped_doctest.SampleClass.double,
test_zipped_doctest.SampleClass.get,
test_zipped_doctest.SampleNewStyleClass,
test_zipped_doctest.SampleNewStyleClass.__init__,
test_zipped_doctest.SampleNewStyleClass.double,
test_zipped_doctest.SampleNewStyleClass.get,
test_zipped_doctest.old_test1,
test_zipped_doctest.old_test2,
test_zipped_doctest.old_test3,
test_zipped_doctest.old_test4,
test_zipped_doctest.sample_func,
test_zipped_doctest.test_DocTest,
test_zipped_doctest.test_DocTestParser,
test_zipped_doctest.test_DocTestRunner.basics,
test_zipped_doctest.test_DocTestRunner.exceptions,
test_zipped_doctest.test_DocTestRunner.option_directives,
test_zipped_doctest.test_DocTestRunner.optionflags,
test_zipped_doctest.test_DocTestRunner.verbose_flag,
test_zipped_doctest.test_Example,
test_zipped_doctest.test_debug,
test_zipped_doctest.test_pdb_set_trace,
test_zipped_doctest.test_pdb_set_trace_nested,
test_zipped_doctest.test_testsource,
test_zipped_doctest.test_trailing_space_in_test,
test_zipped_doctest.test_DocTestSuite,
test_zipped_doctest.test_DocTestFinder,
]
# These remaining tests are the ones which need access
# to the data files, so we don't run them
fail_due_to_missing_data_files = [
test_zipped_doctest.test_DocFileSuite,
test_zipped_doctest.test_testfile,
test_zipped_doctest.test_unittest_reportflags,
]
for obj in known_good_tests:
_run_object_doctest(obj, test_zipped_doctest)
def test_doctest_main_issue4197(self):
test_src = textwrap.dedent("""\
class Test:
">>> 'line 2'"
pass
import doctest
doctest.testmod()
""")
pattern = 'File "%s", line 2, in %s'
with temp_dir() as d:
script_name = _make_test_script(d, 'script', test_src)
exit_code, data = _run_python(script_name)
expected = pattern % (script_name, "__main__.Test")
if verbose:
print "Expected line", expected
print "Got stdout:"
print data
self.assert_(expected in data)
zip_name, run_name = _make_test_zip(d, "test_zip",
script_name, '__main__.py')
exit_code, data = _run_python(zip_name)
expected = pattern % (run_name, "__main__.Test")
if verbose:
print "Expected line", expected
print "Got stdout:"
print data
self.assert_(expected in data)
def test_pdb_issue4201(self):
test_src = textwrap.dedent("""\
def f():
pass
import pdb
pdb.runcall(f)
""")
with temp_dir() as d:
script_name = _make_test_script(d, 'script', test_src)
p = _spawn_python(script_name)
p.stdin.write('l\n')
data = _kill_python(p)
self.assert_(script_name in data)
zip_name, run_name = _make_test_zip(d, "test_zip",
script_name, '__main__.py')
p = _spawn_python(zip_name)
p.stdin.write('l\n')
data = _kill_python(p)
self.assert_(run_name in data)
def test_main():
test.test_support.run_unittest(ZipSupportTests)
test.test_support.reap_children()
if __name__ == '__main__':
test_main()
| apache-2.0 |
weolar/miniblink49 | third_party/WebKit/Source/bindings/scripts/v8_utilities.py | 6 | 21452 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions shared by various parts of the code generator.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import re
from idl_types import IdlTypeBase
import idl_types
from idl_definitions import Exposure, IdlInterface
from v8_globals import includes
ACRONYMS = [
'CSSOM', # must come *before* CSS to match full acronym
'CSS',
'HTML',
'IME',
'JS',
'SVG',
'URL',
'WOFF',
'XML',
'XSLT',
]
################################################################################
# Extended attribute parsing
################################################################################
def extended_attribute_value_contains(extended_attribute_value, key):
return (extended_attribute_value == key or
(isinstance(extended_attribute_value, list) and
key in extended_attribute_value))
def has_extended_attribute(definition_or_member, extended_attribute_list):
return any(extended_attribute in definition_or_member.extended_attributes
for extended_attribute in extended_attribute_list)
def has_extended_attribute_value(definition_or_member, name, value):
extended_attributes = definition_or_member.extended_attributes
return (name in extended_attributes and
extended_attribute_value_contains(extended_attributes[name], value))
def extended_attribute_value_as_list(definition_or_member, name):
extended_attributes = definition_or_member.extended_attributes
if name not in extended_attributes:
return None
value = extended_attributes[name]
if isinstance(value, list):
return value
return [value]
################################################################################
# String handling
################################################################################
def capitalize(name):
"""Capitalize first letter or initial acronym (used in setter names)."""
for acronym in ACRONYMS:
if name.startswith(acronym.lower()):
return name.replace(acronym.lower(), acronym)
return name[0].upper() + name[1:]
def strip_suffix(string, suffix):
if not suffix or not string.endswith(suffix):
return string
return string[:-len(suffix)]
def uncapitalize(name):
"""Uncapitalizes first letter or initial acronym (used in method names).
E.g., 'SetURL' becomes 'setURL', but 'URLFoo' becomes 'urlFoo'.
"""
for acronym in ACRONYMS:
if name.startswith(acronym):
return name.replace(acronym, acronym.lower())
return name[0].lower() + name[1:]
################################################################################
# C++
################################################################################
def scoped_name(interface, definition, base_name):
if 'ImplementedInPrivateScript' in definition.extended_attributes:
return '%s::PrivateScript::%s' % (v8_class_name(interface), base_name)
# partial interfaces are implemented as separate classes, with their members
# implemented as static member functions
partial_interface_implemented_as = definition.extended_attributes.get('PartialInterfaceImplementedAs')
if partial_interface_implemented_as:
return '%s::%s' % (partial_interface_implemented_as, base_name)
if (definition.is_static or
definition.name in ('Constructor', 'NamedConstructor')):
return '%s::%s' % (cpp_name(interface), base_name)
return 'impl->%s' % base_name
def v8_class_name(interface):
return 'V8' + interface.name
def v8_class_name_or_partial(interface):
class_name = v8_class_name(interface)
if interface.is_partial:
return ''.join([class_name, 'Partial'])
return class_name
################################################################################
# Specific extended attributes
################################################################################
# [ActivityLogging]
def activity_logging_world_list(member, access_type=''):
"""Returns a set of world suffixes for which a definition member has activity logging, for specified access type.
access_type can be 'Getter' or 'Setter' if only checking getting or setting.
"""
extended_attributes = member.extended_attributes
if 'LogActivity' not in extended_attributes:
return set()
log_activity = extended_attributes['LogActivity']
if log_activity and not log_activity.startswith(access_type):
return set()
includes.add('bindings/core/v8/V8DOMActivityLogger.h')
if 'LogAllWorlds' in extended_attributes:
return set(['', 'ForMainWorld'])
return set(['']) # At minimum, include isolated worlds.
# [ActivityLogging]
def activity_logging_world_check(member):
"""Returns if an isolated world check is required when generating activity
logging code.
The check is required when there is no per-world binding code and logging is
required only for isolated world.
"""
extended_attributes = member.extended_attributes
if 'LogActivity' not in extended_attributes:
return False
if ('PerWorldBindings' not in extended_attributes and
'LogAllWorlds' not in extended_attributes):
return True
return False
# [CallWith]
CALL_WITH_ARGUMENTS = {
'ScriptState': 'scriptState',
'ExecutionContext': 'executionContext',
'ScriptArguments': 'scriptArguments.release()',
'ActiveWindow': 'callingDOMWindow(info.GetIsolate())',
'FirstWindow': 'enteredDOMWindow(info.GetIsolate())',
'Document': 'document',
'ThisValue': 'ScriptValue(scriptState, info.This())',
}
# List because key order matters, as we want arguments in deterministic order
CALL_WITH_VALUES = [
'ScriptState',
'ExecutionContext',
'ScriptArguments',
'ActiveWindow',
'FirstWindow',
'Document',
'ThisValue',
]
def call_with_arguments(call_with_values):
if not call_with_values:
return []
return [CALL_WITH_ARGUMENTS[value]
for value in CALL_WITH_VALUES
if extended_attribute_value_contains(call_with_values, value)]
# [Conditional]
DELIMITER_TO_OPERATOR = {
'|': '||',
',': '&&',
}
def conditional_string(definition_or_member):
extended_attributes = definition_or_member.extended_attributes
if 'Conditional' not in extended_attributes:
return None
return 'ENABLE(%s)' % extended_attributes['Conditional']
# [Constructor], [NamedConstructor]
def is_constructor_attribute(member):
# TODO(yukishiino): replace this with [Constructor] and [NamedConstructor] extended attribute
return member.idl_type.name.endswith('Constructor')
# [DeprecateAs]
def deprecate_as(member):
extended_attributes = member.extended_attributes
if 'DeprecateAs' not in extended_attributes:
return None
includes.add('core/frame/UseCounter.h')
return extended_attributes['DeprecateAs']
# [Exposed]
EXPOSED_EXECUTION_CONTEXT_METHOD = {
'CompositorWorker': 'isCompositorWorkerGlobalScope',
'DedicatedWorker': 'isDedicatedWorkerGlobalScope',
'ServiceWorker': 'isServiceWorkerGlobalScope',
'SharedWorker': 'isSharedWorkerGlobalScope',
'Window': 'isDocument',
'Worker': 'isWorkerGlobalScope',
}
EXPOSED_WORKERS = set([
'CompositorWorker',
'DedicatedWorker',
'SharedWorker',
'ServiceWorker',
])
class ExposureSet:
"""An ExposureSet is a collection of Exposure instructions."""
def __init__(self, exposures=None):
self.exposures = set(exposures) if exposures else set()
def issubset(self, other):
"""Returns true if |self|'s exposure set is a subset of
|other|'s exposure set. This function doesn't care about
RuntimeEnabled."""
self_set = self._extended(set(e.exposed for e in self.exposures))
other_set = self._extended(set(e.exposed for e in other.exposures))
return self_set.issubset(other_set)
@staticmethod
def _extended(target):
if EXPOSED_WORKERS.issubset(target):
return target | set(['Worker'])
elif 'Worker' in target:
return target | EXPOSED_WORKERS
return target
def add(self, exposure):
self.exposures.add(exposure)
def __len__(self):
return len(self.exposures)
def __iter__(self):
return self.exposures.__iter__()
@staticmethod
def _code(exposure):
exposed = ('context->%s()' %
EXPOSED_EXECUTION_CONTEXT_METHOD[exposure.exposed])
if exposure.runtime_enabled is not None:
runtime_enabled = ('RuntimeEnabledFeatures::%sEnabled()' %
uncapitalize(exposure.runtime_enabled))
return '({0} && {1})'.format(exposed, runtime_enabled)
return exposed
def code(self):
if len(self.exposures) == 0:
return None
# We use sorted here to deflake output.
return ' || '.join(sorted(self._code(e) for e in self.exposures))
def exposed(member, interface):
"""Returns a C++ code that checks if a method/attribute/etc is exposed.
When the Exposed attribute contains RuntimeEnabledFeatures (i.e.
Exposed(Arguments) form is given), the code contains check for them as
well.
EXAMPLE: [Exposed=Window, RuntimeEnabledFeature=Feature1]
=> context->isDocument()
EXAMPLE: [Exposed(Window Feature1, Window Feature2)]
=> context->isDocument() && RuntimeEnabledFeatures::feature1Enabled() ||
context->isDocument() && RuntimeEnabledFeatures::feature2Enabled()
"""
exposure_set = ExposureSet(
extended_attribute_value_as_list(member, 'Exposed'))
interface_exposure_set = ExposureSet(
extended_attribute_value_as_list(interface, 'Exposed'))
for e in exposure_set:
if e.exposed not in EXPOSED_EXECUTION_CONTEXT_METHOD:
raise ValueError('Invalid execution context: %s' % e.exposed)
# Methods must not be exposed to a broader scope than their interface.
if not exposure_set.issubset(interface_exposure_set):
raise ValueError('Interface members\' exposure sets must be a subset of the interface\'s.')
return exposure_set.code()
# [GarbageCollected], [WillBeGarbageCollected]
def gc_type(definition):
extended_attributes = definition.extended_attributes
if 'GarbageCollected' in extended_attributes:
return 'GarbageCollectedObject'
elif 'WillBeGarbageCollected' in extended_attributes:
return 'WillBeGarbageCollectedObject'
return 'RefCountedObject'
# [ImplementedAs]
def cpp_name(definition_or_member):
extended_attributes = definition_or_member.extended_attributes
if 'ImplementedAs' not in extended_attributes:
return definition_or_member.name
return extended_attributes['ImplementedAs']
def cpp_name_from_interfaces_info(name, interfaces_info):
return interfaces_info.get(name, {}).get('implemented_as') or name
def cpp_name_or_partial(interface):
cpp_class_name = cpp_name(interface)
if interface.is_partial:
return ''.join([cpp_class_name, 'Partial'])
return cpp_class_name
# [MeasureAs]
def measure_as(definition_or_member, interface):
extended_attributes = definition_or_member.extended_attributes
if 'MeasureAs' in extended_attributes:
includes.add('core/frame/UseCounter.h')
return lambda suffix: extended_attributes['MeasureAs']
if 'Measure' in extended_attributes:
includes.add('core/frame/UseCounter.h')
measure_as_name = capitalize(definition_or_member.name)
if interface is not None:
measure_as_name = '%s_%s' % (capitalize(interface.name), measure_as_name)
return lambda suffix: 'V8%s_%s' % (measure_as_name, suffix)
return None
# [RuntimeEnabled]
def runtime_enabled_function_name(definition_or_member):
"""Returns the name of the RuntimeEnabledFeatures function.
The returned function checks if a method/attribute is enabled.
Given extended attribute RuntimeEnabled=FeatureName, return:
RuntimeEnabledFeatures::{featureName}Enabled
"""
extended_attributes = definition_or_member.extended_attributes
if 'RuntimeEnabled' not in extended_attributes:
return None
feature_name = extended_attributes['RuntimeEnabled']
return 'RuntimeEnabledFeatures::%sEnabled' % uncapitalize(feature_name)
# [Unforgeable]
def is_unforgeable(interface, member):
return (('Unforgeable' in interface.extended_attributes or
'Unforgeable' in member.extended_attributes) and
not member.is_static)
# [TypeChecking=Interface] / [LegacyInterfaceTypeChecking]
def is_legacy_interface_type_checking(interface, member):
if not ('TypeChecking' in interface.extended_attributes or
'TypeChecking' in member.extended_attributes):
return True
if 'LegacyInterfaceTypeChecking' in member.extended_attributes:
return True
return False
# [Unforgeable], [Global], [PrimaryGlobal] and [DoNotExposeJSAccessors]
def on_instance(interface, member):
"""Returns True if the interface's member needs to be defined on every
instance object.
The following members must be defiend on an instance object.
- [Unforgeable] members
- regular members of [Global] or [PrimaryGlobal] interfaces
- members on which [DoNotExposeJSAccessors] is specified
"""
# TODO(yukishiino): Implement this function following the spec.
return not on_prototype(interface, member)
# [ExposeJSAccessors]
def on_prototype(interface, member):
"""Returns True if the interface's member needs to be defined on the
prototype object.
Most members are defined on the prototype object. Exceptions are as
follows.
- constant members
- static members (optional)
- [Unforgeable] members
- members of [Global] or [PrimaryGlobal] interfaces
- named properties of [Global] or [PrimaryGlobal] interfaces
However, if [ExposeJSAccessors] is specified, the member is defined on the
prototype object.
"""
# TODO(yukishiino): Implement this function following the spec.
if ('ExposeJSAccessors' in interface.extended_attributes and
'DoNotExposeJSAccessors' in interface.extended_attributes):
raise Exception('Both of ExposeJSAccessors and DoNotExposeJSAccessors are specified at a time in an interface: ' + interface.name)
if ('ExposeJSAccessors' in member.extended_attributes and
'DoNotExposeJSAccessors' in member.extended_attributes):
raise Exception('Both of ExposeJSAccessors and DoNotExposeJSAccessors are specified at a time on a member: ' + member.name + ' in an interface: ' + interface.name)
# Note that ExposeJSAccessors and DoNotExposeJSAccessors are more powerful
# than 'static', [Unforgeable] and [OverrideBuiltins].
if 'ExposeJSAccessors' in member.extended_attributes:
return True
if 'DoNotExposeJSAccessors' in member.extended_attributes:
return False
# These members must not be placed on prototype chains.
if (is_constructor_attribute(member) or
member.is_static or
is_unforgeable(interface, member) or
'OverrideBuiltins' in interface.extended_attributes):
return False
# TODO(yukishiino): We should handle [Global] and [PrimaryGlobal] instead of
# Window.
if (interface.name == 'Window'):
return member.idl_type.name == 'EventHandler'
if 'ExposeJSAccessors' in interface.extended_attributes:
return True
if 'DoNotExposeJSAccessors' in interface.extended_attributes:
return False
return True
# static, const
def on_interface(interface, member):
"""Returns True if the interface's member needs to be defined on the
interface object.
The following members must be defiend on an interface object.
- constant members
- static members
"""
# TODO(yukishiino): Implement this function following the spec.
return False
################################################################################
# Indexed properties
# http://heycam.github.io/webidl/#idl-indexed-properties
################################################################################
def indexed_property_getter(interface):
try:
# Find indexed property getter, if present; has form:
# getter TYPE [OPTIONAL_IDENTIFIER](unsigned long ARG1)
return next(
method
for method in interface.operations
if ('getter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'unsigned long'))
except StopIteration:
return None
def indexed_property_setter(interface):
try:
# Find indexed property setter, if present; has form:
# setter RETURN_TYPE [OPTIONAL_IDENTIFIER](unsigned long ARG1, ARG_TYPE ARG2)
return next(
method
for method in interface.operations
if ('setter' in method.specials and
len(method.arguments) == 2 and
str(method.arguments[0].idl_type) == 'unsigned long'))
except StopIteration:
return None
def indexed_property_deleter(interface):
try:
# Find indexed property deleter, if present; has form:
# deleter TYPE [OPTIONAL_IDENTIFIER](unsigned long ARG)
return next(
method
for method in interface.operations
if ('deleter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'unsigned long'))
except StopIteration:
return None
################################################################################
# Named properties
# http://heycam.github.io/webidl/#idl-named-properties
################################################################################
def named_property_getter(interface):
try:
# Find named property getter, if present; has form:
# getter TYPE [OPTIONAL_IDENTIFIER](DOMString ARG1)
getter = next(
method
for method in interface.operations
if ('getter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'DOMString'))
getter.name = getter.name or 'anonymousNamedGetter'
return getter
except StopIteration:
return None
def named_property_setter(interface):
try:
# Find named property setter, if present; has form:
# setter RETURN_TYPE [OPTIONAL_IDENTIFIER](DOMString ARG1, ARG_TYPE ARG2)
return next(
method
for method in interface.operations
if ('setter' in method.specials and
len(method.arguments) == 2 and
str(method.arguments[0].idl_type) == 'DOMString'))
except StopIteration:
return None
def named_property_deleter(interface):
try:
# Find named property deleter, if present; has form:
# deleter TYPE [OPTIONAL_IDENTIFIER](DOMString ARG)
return next(
method
for method in interface.operations
if ('deleter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'DOMString'))
except StopIteration:
return None
IdlInterface.indexed_property_getter = property(indexed_property_getter)
IdlInterface.indexed_property_setter = property(indexed_property_setter)
IdlInterface.indexed_property_deleter = property(indexed_property_deleter)
IdlInterface.named_property_getter = property(named_property_getter)
IdlInterface.named_property_setter = property(named_property_setter)
IdlInterface.named_property_deleter = property(named_property_deleter)
| apache-2.0 |
vovojh/gem5 | src/arch/mips/MipsISA.py | 61 | 2521 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class MipsISA(SimObject):
type = 'MipsISA'
cxx_class = 'MipsISA::ISA'
cxx_header = "arch/mips/isa.hh"
system = Param.System(Parent.any, "System this ISA object belongs to")
num_threads = Param.UInt8(1, "Maximum number this ISA can handle")
num_vpes = Param.UInt8(1, "Maximum number of vpes this ISA can handle")
| bsd-3-clause |
Bjay1435/capstone | rootfs/usr/lib/python3.4/multiprocessing/queues.py | 83 | 11393 | #
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
import sys
import os
import threading
import collections
import time
import weakref
import errno
from queue import Empty, Full
import _multiprocessing
from . import connection
from . import context
from .util import debug, info, Finalize, register_after_fork, is_exiting
from .reduction import ForkingPickler
#
# Queue type using a pipe, buffer and thread
#
class Queue(object):
def __init__(self, maxsize=0, *, ctx):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
self._sem = ctx.BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
context.assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send_bytes = self._writer.send_bytes
self._recv_bytes = self._reader.recv_bytes
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
finally:
self._notempty.release()
def get(self, block=True, timeout=None):
if block and timeout is None:
with self._rlock:
res = self._recv_bytes()
self._sem.release()
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if block:
timeout = deadline - time.time()
if timeout < 0 or not self._poll(timeout):
raise Empty
elif not self._poll():
raise Empty
res = self._recv_bytes()
self._sem.release()
finally:
self._rlock.release()
# unserialize the data after having released the lock
return ForkingPickler.loads(res)
def qsize(self):
# Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()
def join_thread(self):
debug('Queue.join_thread()')
assert self._closed
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send_bytes,
self._wlock, self._writer.close, self._ignore_epipe),
name='QueueFeederThread'
)
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
notempty.acquire()
try:
buffer.append(_sentinel)
notempty.notify()
finally:
notempty.release()
@staticmethod
def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe):
debug('starting thread to feed data to pipe')
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
# serialize the data before acquiring the lock
obj = ForkingPickler.dumps(obj)
if wacquire is None:
send_bytes(obj)
else:
wacquire()
try:
send_bytes(obj)
finally:
wrelease()
except IndexError:
pass
except Exception as e:
if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
try:
if is_exiting():
info('error in queue thread: %s', e)
else:
import traceback
traceback.print_exc()
except Exception:
pass
_sentinel = object()
#
# A queue type which also supports join() and task_done() methods
#
# Note that if you do not call task_done() for each finished task then
# eventually the counter's semaphore may overflow causing Bad Things
# to happen.
#
class JoinableQueue(Queue):
def __init__(self, maxsize=0, *, ctx):
Queue.__init__(self, maxsize, ctx=ctx)
self._unfinished_tasks = ctx.Semaphore(0)
self._cond = ctx.Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
self._cond.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
finally:
self._cond.release()
self._notempty.release()
def task_done(self):
self._cond.acquire()
try:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
finally:
self._cond.release()
def join(self):
self._cond.acquire()
try:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
finally:
self._cond.release()
#
# Simplified Queue type -- really just a locked pipe
#
class SimpleQueue(object):
def __init__(self, *, ctx):
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._poll = self._reader.poll
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
def empty(self):
return not self._poll()
def __getstate__(self):
context.assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
def get(self):
with self._rlock:
res = self._reader.recv_bytes()
# unserialize the data after having released the lock
return ForkingPickler.loads(res)
def put(self, obj):
# serialize the data before acquiring the lock
obj = ForkingPickler.dumps(obj)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(obj)
else:
with self._wlock:
self._writer.send_bytes(obj)
| mit |
QuantumQuadrate/CsPyController | python/newportstage.py | 1 | 5006 | from __future__ import division
"""
aerotech.py
part of the CsPyController package for AQuA experiment control by Martin Lichtman
Handles sending global variable updates to the Aerotech Ensemble translation stage.
This python code sends commands via TCP to a server running in C#, also in this package.
The C# server then uses the .NET assembly provided by Aerotech to talk to the Ensemble driver.
created = 2015.06.22
modified >= 2015.06.22
"""
__author__ = 'Martin Lichtman'
import logging
logger = logging.getLogger(__name__)
from atom.api import Bool, Member, Float
from instrument_property import FloatProp, StrProp
from cs_instruments import Instrument
from cs_errors import PauseError
import NewportStageController as newportcontroller
import time
class NewportStage(Instrument):
version = '2016.12.21'
setposition = Member()
allow_evaluation = Bool(True)
gui = Member()
nport = Member()
comport = Member()
velocity = Member()
command=Member()
mypos = Float()
axis = Member()
statusmeasurement = Bool(True)
def __init__(self, name, experiment, description=''):
super(NewportStage, self).__init__(name, experiment, description)
self.setposition = FloatProp('setposition', experiment, 'Set Position (mm)','0')
self.comport = StrProp('comport',experiment,'COM port','COM6')
self.axis = StrProp('axis',experiment,'Axis','X')
self.velocity = FloatProp('velocity',experiment,'Velocity (mm/s)','10')
self.command = StrProp('command',experiment,'Command to send','')
self.properties += ['setposition', 'comport', 'velocity', 'axis',
'version','statusmeasurement']
def initialize(self):
if self.nport is not None:
#print "Deleting Newport controller"
del self.nport
if self.enable and not self.isInitialized:
self.nport = newportcontroller.Newport(self.comport.value, self.axis.value)
self.isInitialized = True
# if self.nport.test_port():
# print "Port is initialized, Axis = {}.".format(self.axis.value)
# self.isInitialized = True
# else:
# print "Wrong Port try again"
# self.isInitialized = False
def start(self):
self.isDone = True
def update(self):
if self.enable:
self.moveStage()
return
def moveStage(self,recurse=0):
maxrecurse=3
if recurse > maxrecurse:
logger.error("Newport stage position not reached in 10 seconds")
logger.error("Set Position: {} mm, observed position: {} mm".format(self.setposition.value,self.mypos))
raise PauseError
if not self.isInitialized:
self.initialize()
self.nport.moveAbs(self.setposition.value*1000)
done=self.nport.status()
loopcounter=0
while done != self.axis.value + 'D':
done = self.nport.status()
logger.info('Status: {}\n'.format(done))
loopcounter += 1
# controller sometimes gets confused,
# resulting in it returning B continuously.
# If this happens, reset the driver and try again.
if loopcounter > 10:
self.isInitialized = False
self.moveStage(recurse=recurse+1)
logger.info('Status: {}\n'.format(done))
self.mypos = self.whereAmI()
loopcounter=0
maxloops=3
while loopcounter < maxloops and abs(self.mypos-self.setposition.value) > .001:
time.sleep(.05)
self.mypos = self.whereAmI()
loopcounter+=1
if loopcounter >=maxloops:
logger.warning('Newport stage did not converge. Attempting command again.')
self.moveStage(recurse=recurse+1) #if it doesn't converge, try sending the command again.
def updateaxis(self):
if not self.isInitialized:
self.initialize()
else:
self.nport.setaxis(self.axis.value)
def checkCurrentPosition(self):
self.mypos = self.whereAmI()
def whereAmI(self):
return self.nport.whereAmI()/1000
def writeResults(self, hdf5):
if self.enable and self.statusmeasurement:
self.mypos = self.whereAmI()
return
def calibrate(self):
if not self.isInitialized:
self.initialize()
self.nport.calibrateStage()
def findcenter(self):
if not self.isInitialized:
self.initialize()
self.nport.findCenter()
def home(self):
if not self.isInitialized:
self.initialize()
self.nport.home()
def setvelocity(self):
if not self.isInitialized:
self.initialize()
self.nport.setVelocity(self.velocity.value*1000)
def sendSerialCommand(self):
self.nport.WriteThenPrint(self.command.value) | lgpl-3.0 |
DaviKaur/LibreHatti | src/librehatti/settings.py | 3 | 3241 | from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
from librehatti.config import _SENDER_EMAIL
from librehatti.config import _PASSWORD
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'db_name',
'USER': 'db_user',
'PASSWORD': 'db_password',
'HOST': 'localhost',
'PORT': '',
}
}
ALLOWED_HOSTS = []
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = _SENDER_EMAIL
EMAIL_HOST_PASSWORD = _PASSWORD
TIME_ZONE = 'Asia/Kolkata'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCAL_URL = ''
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
'/path/to/LibreHatti/static',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
SECRET_KEY = 'v5j3-ny)7zlk3wmqyg298#re3#8-v_v6+@9635h0-x9zak+8t*'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
ROOT_URLCONF = 'librehatti.urls'
WSGI_APPLICATION = 'librehatti.wsgi.application'
TEMPLATE_DIRS = (
'templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'suit',
'mptt',
'django.contrib.admin',
'librehatti.catalog',
'useraccounts',
'tinymce',
'librehatti.prints',
'librehatti.suspense',
'librehatti.bills',
'librehatti.reports',
'librehatti.voucher',
'librehatti.programmeletter',
)
SUIT_CONFIG = {
'ADMIN_NAME': 'LibreHatti',
'MENU_ICONS': {
'sites': 'icon-leaf',
'auth': 'icon-lock',
'bills': 'icon-file',
},
}
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_REDIRECT_URL = 'admin:catalog'
LOGIN_URL = 'admin:login'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
}
},
}
AJAX_LOOKUP_CHANNELS = {
'buyer': ('librehatti.catalog.lookups', 'BuyerLookup'),
'staff': ('librehatti.programmeletter.stafflookups', 'StaffLookup'),
}
| gpl-2.0 |
JGarcia-Panach/odoo | addons/base_gengo/ir_translation.py | 343 | 4344 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
LANG_CODE_MAPPING = {
'ar_SY': ('ar', 'Arabic'),
'id_ID': ('id', 'Indonesian'),
'nl_NL': ('nl', 'Dutch'),
'fr_CA': ('fr-ca', 'French (Canada)'),
'pl_PL': ('pl', 'Polish'),
'zh_TW': ('zh-tw', 'Chinese (Traditional)'),
'sv_SE': ('sv', 'Swedish'),
'ko_KR': ('ko', 'Korean'),
'pt_PT': ('pt', 'Portuguese (Europe)'),
'en_US': ('en', 'English'),
'ja_JP': ('ja', 'Japanese'),
'es_ES': ('es', 'Spanish (Spain)'),
'zh_CN': ('zh', 'Chinese (Simplified)'),
'de_DE': ('de', 'German'),
'fr_FR': ('fr', 'French'),
'fr_BE': ('fr', 'French'),
'ru_RU': ('ru', 'Russian'),
'it_IT': ('it', 'Italian'),
'pt_BR': ('pt-br', 'Portuguese (Brazil)'),
'th_TH': ('th', 'Thai'),
'nb_NO': ('no', 'Norwegian'),
'ro_RO': ('ro', 'Romanian'),
'tr_TR': ('tr', 'Turkish'),
'bg_BG': ('bg', 'Bulgarian'),
'da_DK': ('da', 'Danish'),
'en_GB': ('en-gb', 'English (British)'),
'el_GR': ('el', 'Greek'),
'vi_VN': ('vi', 'Vietnamese'),
'he_IL': ('he', 'Hebrew'),
'hu_HU': ('hu', 'Hungarian'),
'fi_FI': ('fi', 'Finnish')
}
class ir_translation(osv.Model):
_name = "ir.translation"
_inherit = "ir.translation"
_columns = {
'gengo_comment': fields.text("Comments & Activity Linked to Gengo"),
'order_id': fields.char('Gengo Order ID', size=32),
"gengo_translation": fields.selection([('machine', 'Translation By Machine'),
('standard', 'Standard'),
('pro', 'Pro'),
('ultra', 'Ultra')], "Gengo Translation Service Level", help='You can select here the service level you want for an automatic translation using Gengo.'),
}
def _get_all_supported_languages(self, cr, uid, context=None):
flag, gengo = self.pool.get('base.gengo.translations').gengo_authentication(cr, uid, context=context)
if not flag:
raise osv.except_osv(_('Gengo Authentication Error'), gengo)
supported_langs = {}
lang_pair = gengo.getServiceLanguagePairs(lc_src='en')
if lang_pair['opstat'] == 'ok':
for g_lang in lang_pair['response']:
if g_lang['lc_tgt'] not in supported_langs:
supported_langs[g_lang['lc_tgt']] = []
supported_langs[g_lang['lc_tgt']] += [g_lang['tier']]
return supported_langs
def _get_gengo_corresponding_language(cr, lang):
return lang in LANG_CODE_MAPPING and LANG_CODE_MAPPING[lang][0] or lang
def _get_source_query(self, cr, uid, name, types, lang, source, res_id):
query, params = super(ir_translation, self)._get_source_query(cr, uid, name, types, lang, source, res_id)
query += """
ORDER BY
CASE
WHEN gengo_translation=%s then 10
WHEN gengo_translation=%s then 20
WHEN gengo_translation=%s then 30
WHEN gengo_translation=%s then 40
ELSE 0
END DESC
"""
params += ('machine', 'standard', 'ultra', 'pro',)
return (query, params)
| agpl-3.0 |
danilito19/django | django/views/generic/base.py | 281 | 7690 | from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch, reverse
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.decorators import classonlymethod
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
view.view_class = cls
view.view_initkwargs = initkwargs
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
template_engine = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the url conf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = False
url = None
pattern_name = None
query_string = False
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
return None
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', request.path,
extra={
'status_code': 410,
'request': request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
| bsd-3-clause |
indx/indx-core | apps/service_instagram/bin/instagram/models.py | 5 | 4938 | from helper import timestamp_to_datetime
class ApiModel(object):
@classmethod
def object_from_dictionary(cls, entry):
# make dict keys all strings
entry_str_dict = dict([(str(key), value) for key, value in entry.items()])
return cls(**entry_str_dict)
def __repr__(self):
return unicode(self).encode('utf8')
class Image(ApiModel):
def __init__(self, url, width, height):
self.url = url
self.height = height
self.width = width
def __unicode__(self):
return "Image: %s" % self.url
class Media(ApiModel):
def __init__(self, id=None, **kwargs):
self.id = id
for key, value in kwargs.iteritems():
setattr(self, key, value)
def get_standard_resolution_url(self):
return self.images['standard_resolution'].url
def __unicode__(self):
return "Media: %s" % self.id
@classmethod
def object_from_dictionary(cls, entry):
new_media = Media(id=entry['id'])
new_media.user = User.object_from_dictionary(entry['user'])
new_media.images = {}
for version, version_info in entry['images'].iteritems():
new_media.images[version] = Image.object_from_dictionary(version_info)
if 'user_has_liked' in entry:
new_media.user_has_liked = entry['user_has_liked']
new_media.like_count = entry['likes']['count']
new_media.likes = []
if 'data' in entry['likes']:
for like in entry['likes']['data']:
new_media.likes.append(User.object_from_dictionary(like))
new_media.comment_count = entry['comments']['count']
new_media.comments = []
for comment in entry['comments']['data']:
new_media.comments.append(Comment.object_from_dictionary(comment))
new_media.created_time = timestamp_to_datetime(entry['created_time'])
if entry['location'] and 'id' in entry:
new_media.location = Location.object_from_dictionary(entry['location'])
new_media.caption = None
if entry['caption']:
new_media.caption = Comment.object_from_dictionary(entry['caption'])
if entry['tags']:
new_media.tags = []
for tag in entry['tags']:
new_media.tags.append(Tag.object_from_dictionary({'name': tag}))
new_media.link = entry['link']
new_media.filter = entry.get('filter')
return new_media
class Tag(ApiModel):
def __init__(self, name, **kwargs):
self.name = name
for key, value in kwargs.iteritems():
setattr(self, key, value)
def __unicode__(self):
return "Tag: %s" % self.name
class Comment(ApiModel):
def __init__(self, *args, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
@classmethod
def object_from_dictionary(cls, entry):
user = User.object_from_dictionary(entry['from'])
text = entry['text']
created_at = timestamp_to_datetime(entry['created_time'])
id = entry['id']
return Comment(id=id, user=user, text=text, created_at=created_at)
def __unicode__(self):
return "Comment: %s said \"%s\"" % (self.user.username, self.text)
class Point(ApiModel):
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def __unicode__(self):
return "Point: (%s, %s)" % (self.latitude, self.longitude)
class Location(ApiModel):
def __init__(self, id, *args, **kwargs):
self.id = id
for key, value in kwargs.iteritems():
setattr(self, key, value)
@classmethod
def object_from_dictionary(cls, entry):
point = None
if 'latitude' in entry:
point = Point(entry.get('latitude'),
entry.get('longitude'))
location = Location(entry.get('id', 0),
point=point,
name=entry.get('name', ''))
return location
def __unicode__(self):
return "Location: %s (%s)" % (self.id, self.point)
class User(ApiModel):
def __init__(self, id, *args, **kwargs):
self.id = id
for key, value in kwargs.iteritems():
setattr(self, key, value)
def __unicode__(self):
return "User: %s" % self.username
class Relationship(ApiModel):
def __init__(self, incoming_status="none", outgoing_status="none", target_user_is_private=False):
self.incoming_status = incoming_status
self.outgoing_status = outgoing_status
self.target_user_is_private = target_user_is_private
def __unicode__(self):
follows = False if self.outgoing_status == 'none' else True
followed = False if self.incoming_status == 'none' else True
return "Relationship: (Follows: %s, Followed by: %s)" % (follows, followed)
| agpl-3.0 |
leiferikb/bitpop | src/tools/perf/measurements/loading_profile.py | 10 | 1754 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
from metrics import loading
from telemetry.core.platform.profiler import perf_profiler
from telemetry.page import page_measurement
class LoadingProfile(page_measurement.PageMeasurement):
options = {'page_repeat': 2}
def __init__(self):
super(LoadingProfile, self).__init__(discard_first_result=True)
@property
def results_are_the_same_on_every_page(self):
return False
def CustomizeBrowserOptions(self, options):
if not perf_profiler.PerfProfiler.is_supported(browser_type='any'):
raise Exception('This measurement is not supported on this platform')
perf_profiler.PerfProfiler.CustomizeBrowserOptions(
browser_type='any', options=options)
def WillNavigateToPage(self, page, tab):
tab.browser.StartProfiling(perf_profiler.PerfProfiler.name(),
os.path.join(tempfile.mkdtemp(),
page.file_safe_name))
def MeasurePage(self, page, tab, results):
# In current telemetry tests, all tests wait for DocumentComplete state,
# but we need to wait for the load event.
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
profile_files = tab.browser.StopProfiling()
loading.LoadingMetric().AddResults(tab, results)
profile_file = None
for profile_file in profile_files:
if 'renderer' in profile_file:
break
for function, period in perf_profiler.PerfProfiler.GetTopSamples(
profile_file, 10).iteritems():
results.Add(function.replace('.', '_'), 'period', period)
| gpl-3.0 |
seeminglee/pyglet64 | tests/window/EVENT_KEY.py | 33 | 1087 | #!/usr/bin/env python
'''Test that key press and release events work correctly.
Expected behaviour:
One window will be opened. Type into this window and check the console
output for key press and release events. Check that the correct
key symbol and modifiers are reported.
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import window
from pyglet.window import key
class EVENT_KEYPRESS(unittest.TestCase):
def on_key_press(self, symbol, modifiers):
print 'Pressed %s with modifiers %s' % \
(key.symbol_string(symbol), key.modifiers_string(modifiers))
def on_key_release(self, symbol, modifiers):
print 'Released %s with modifiers %s' % \
(key.symbol_string(symbol), key.modifiers_string(modifiers))
def test_keypress(self):
w = window.Window(200, 200)
w.push_handlers(self)
while not w.has_exit:
w.dispatch_events()
w.close()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
shaftoe/home-assistant | homeassistant/components/notify/lannouncer.py | 26 | 2596 | """
Lannouncer platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.lannouncer/
"""
import logging
from urllib.parse import urlencode
import socket
import voluptuous as vol
from homeassistant.components.notify import (
PLATFORM_SCHEMA, ATTR_DATA, BaseNotificationService)
from homeassistant.const import (CONF_HOST, CONF_PORT)
import homeassistant.helpers.config_validation as cv
ATTR_METHOD = 'method'
ATTR_METHOD_DEFAULT = 'speak'
ATTR_METHOD_ALLOWED = ['speak', 'alarm']
DEFAULT_PORT = 1035
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the Lannouncer notification service."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
return LannouncerNotificationService(hass, host, port)
class LannouncerNotificationService(BaseNotificationService):
"""Implementation of a notification service for Lannouncer."""
def __init__(self, hass, host, port):
"""Initialize the service."""
self._hass = hass
self._host = host
self._port = port
def send_message(self, message="", **kwargs):
"""Send a message to Lannouncer."""
data = kwargs.get(ATTR_DATA)
if data is not None and ATTR_METHOD in data:
method = data.get(ATTR_METHOD)
else:
method = ATTR_METHOD_DEFAULT
if method not in ATTR_METHOD_ALLOWED:
_LOGGER.error("Unknown method %s", method)
return
cmd = urlencode({method: message})
try:
# Open socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
sock.connect((self._host, self._port))
# Send message
_LOGGER.debug("Sending message: %s", cmd)
sock.sendall(cmd.encode())
sock.sendall("&@DONE@\n".encode())
# Check response
buffer = sock.recv(1024)
if buffer != b'LANnouncer: OK':
_LOGGER.error("Error sending data to Lannnouncer: %s",
buffer.decode())
# Close socket
sock.close()
except socket.gaierror:
_LOGGER.error("Unable to connect to host %s", self._host)
except socket.error:
_LOGGER.exception("Failed to send data to Lannnouncer")
| apache-2.0 |
jollyroger/debian-buildbot | buildbot/db/migrate/versions/012_add_users_table.py | 3 | 2702 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sqlalchemy as sa
def upgrade(migrate_engine):
metadata = sa.MetaData()
metadata.bind = migrate_engine
# what defines a user
users = sa.Table("users", metadata,
sa.Column("uid", sa.Integer, primary_key=True),
sa.Column("identifier", sa.String(256), nullable=False),
)
users.create()
idx = sa.Index('users_identifier', users.c.identifier)
idx.create()
# ways buildbot knows about users
users_info = sa.Table("users_info", metadata,
sa.Column("uid", sa.Integer, sa.ForeignKey('users.uid'),
nullable=False),
sa.Column("attr_type", sa.String(128), nullable=False),
sa.Column("attr_data", sa.String(128), nullable=False)
)
users_info.create()
idx = sa.Index('users_info_uid', users_info.c.uid)
idx.create()
idx = sa.Index('users_info_uid_attr_type', users_info.c.uid,
users_info.c.attr_type, unique=True)
idx.create()
idx = sa.Index('users_info_attrs', users_info.c.attr_type,
users_info.c.attr_data, unique=True)
idx.create()
# correlates change authors and user uids
sa.Table('changes', metadata, autoload=True)
change_users = sa.Table("change_users", metadata,
sa.Column("changeid", sa.Integer, sa.ForeignKey('changes.changeid'),
nullable=False),
sa.Column("uid", sa.Integer, sa.ForeignKey('users.uid'),
nullable=False)
)
change_users.create()
idx = sa.Index('change_users_changeid', change_users.c.changeid)
idx.create()
# note that existing changes are not added to the users table; this would
# be *very* time-consuming and would not be helpful to the vast majority of
# users.
| gpl-2.0 |
inovtec-solutions/OpenERP | openerp/addons/mx/TextTools/mxTextTools/test.py | 1 | 29761 | # -*- coding: latin-1 -*-
from mx.TextTools import __version__
from mx.TextTools.Examples.HTML import *
from mx.TextTools.Constants.TagTables import *
import pprint, time, pickle
print 'Testing mxTextTools version', __version__
print
# Test for Unicode
try:
unicode
except NameError:
HAVE_UNICODE = 0
else:
HAVE_UNICODE = 1
ua = unicode('a')
ub = unicode('b')
uc = unicode('c')
ud = unicode('d')
ue = unicode('e')
uabc = unicode('abc')
uHello = unicode('Hello')
uempty = unicode('')
# Find a HTML file usable for the test
if len(sys.argv) > 1:
filenames = sys.argv[1:]
else:
filenames = ['/usr/share/doc/packages/mysql/html/manual.html',
'../Doc/mxTextTools.html']
text = ''
for filename in filenames:
try:
text = open(filename).read()
except IOError:
pass
else:
print 'HTML file used for testing the Tagging Engine:'
print ' ', filename
print
break
if not text:
text = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<HTML>
<HEAD>
<TITLE>mx Extension Series - License Information</TITLE>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
</HEAD>
<BODY TEXT="#000000" BGCOLOR="#FFFFFF" LINK="#0000EE" VLINK="#551A8B" ALINK="#FF0000">
...
<CENTER><FONT SIZE=-1>© 2000, Copyright by eGenix.com
Software GmbH, Langengeld, Germany; All Rights Reserved. mailto:
<A HREF="mailto:info@egenix.com">info@egenix.com</A>
</FONT></CENTER>
</BODY>
</HTML>
"""#"
# Test suite
while 1:
if 1:
print 'Tagging Engine:'
print ' parsing HTML ...',
utext = upper(text)
t = time.clock()
result, taglist, nextindex = tag(utext, htmltable)
assert result == 1
print ' done. (%5.2f sec.; len(taglist)=%i)' % \
(time.clock() - t, len(taglist))
if HAVE_UNICODE:
print ' parsing Unicode HTML ...',
try:
uutext = unicode(utext, 'latin-1')
except UnicodeError:
print ' ... HTML data must be Latin-1; skipping test.'
else:
t = time.clock()
result, utaglist, nextindex = tag(uutext, htmltable)
assert result == 1
print ' done. (%5.2f sec.; len(utaglist)=%i)' % \
(time.clock() - t, len(utaglist))
assert taglist == utaglist
utaglist = None
uutext = None
utext = None
taglist = None
print ' testing some tag table semantics...'
table = ((None,Word,'Word'),)
assert tag('Word',table)[0] == 1
assert tag('word',table)[0] == 0
assert tag('xyz',table)[0] == 0
table = ((None,Word,'Word',MatchFail),)
assert tag('Word',table)[0] == 1
assert tag('word',table)[0] == 0
assert tag('xyz',table)[0] == 0
table = ((None,Word,'Word',MatchOk),)
assert tag('Word',table)[0] == 1
assert tag('word',table)[0] == 1
assert tag('xyz',table)[0] == 1
table = ((None,Word,'Word',MatchOk,MatchFail),)
assert tag('Word',table)[0] == 0
assert tag('word',table)[0] == 1
assert tag('xyz',table)[0] == 1
print ' done.'
#continue
print 'splitat()'
assert splitat('Hello','l') == ('He', 'lo')
assert splitat('Hello','l',2) == ('Hel', 'o')
assert splitat('Hello','l',-1) == ('Hel', 'o')
assert splitat('Hello','l',-2) == ('He', 'lo')
if HAVE_UNICODE:
assert splitat(uHello,'l') == (unicode('He'), unicode('lo'))
assert splitat(uHello,'l',2) == (unicode('Hel'), unicode('o'))
assert splitat(uHello,unicode('l'),-1) == (unicode('Hel'), unicode('o'))
assert splitat(uHello,unicode('l'),-2) == (unicode('He'), unicode('lo'))
print 'suffix()'
assert suffix('abc.html/',('.htm','abc','.html','/'),0,3) == 'abc'
assert suffix('abc.html/',('.htm','abc','.html','/'),0,4) == None
assert suffix('abc.html/',('.htm','abc','.html','/'),0,8) == '.html'
if HAVE_UNICODE:
assert suffix(unicode('abc.html/'),('.htm','abc','.html','/'),0,3) == unicode('abc')
assert suffix(unicode('abc.html/'),(unicode('.htm'),unicode('abc'),'.html','/'),0,3) == unicode('abc')
assert suffix(unicode('abc.html/'),('.htm','abc',unicode('.html'),'/'),0,3) == unicode('abc')
try:
suffix('abc.html/',(unicode('.htm'),'abc','.html','/'))
except TypeError:
pass
else:
raise AssertionError, \
'suffix(string,...) should not accept unicode suffixes'
assert suffix(unicode('abc.html/'),('.htm','abc','.html','/'),0,4) == None
print 'prefix()'
assert prefix('abc.html/',('.htm','abc','.html','/'),0,3) == 'abc'
assert prefix('abc.html/',('.htm','abc','.html','/'),1,4) == None
assert prefix('abc.html/',('.htm','abc','.html','/'),3,9) == '.htm'
if HAVE_UNICODE:
assert prefix(unicode('abc.html/'),('.htm','abc','.html','/'),0,3) == unicode('abc')
assert prefix(unicode('abc.html/'),(unicode('.htm'),unicode('abc'),'.html','/'),0,3) == unicode('abc')
assert prefix(unicode('abc.html/'),('.htm','abc',unicode('.html'),'/'),0,3) == unicode('abc')
try:
prefix('abc.html/',(unicode('.htm'),'abc','.html','/'))
except TypeError:
pass
else:
raise AssertionError, \
'prefix(string,...) should not accept unicode prefixes'
assert prefix(unicode('abc.html/'),('.htm','abc','.html','/'),0,4) == unicode('abc')
print 'join()'
assert join(('a','b','c')) == 'abc'
assert join(['a','b','c']) == 'abc'
assert join(('a','b','c'),' ') == 'a b c'
assert join(['a','b','c'],' ') == 'a b c'
assert join((('abc',0,1),('abc',1,2),('abc',2,3))) == 'abc'
assert join((('abc',0,1),'b',('abc',2,3))) == 'abc'
assert join((('abc',0,3),)) == 'abc'
if HAVE_UNICODE:
assert join((ua,ub,uc)) == uabc
assert join([ua,ub,uc]) == uabc
assert join((ua,ub,uc),' ') == unicode('a b c')
assert join([ua,ub,uc],' ') == unicode('a b c')
assert join(((uabc,0,1),(uabc,1,2),(uabc,2,3))) == uabc
assert join(((uabc,0,1),ub,(uabc,2,3))) == uabc
assert join(((uabc,0,3),)) == uabc
print 'upper()'
assert upper('HeLLo') == 'HELLO'
assert upper('hello') == 'HELLO'
assert upper('HELLO') == 'HELLO'
assert upper('HELLO ') == 'HELLO '
assert upper('HELLO 123') == 'HELLO 123'
if HAVE_UNICODE:
assert upper(unicode('HeLLo')) == unicode('HELLO')
assert upper(unicode('hello')) == unicode('HELLO')
assert upper(unicode('HELLO')) == unicode('HELLO')
assert upper(unicode('HELLO ')) == unicode('HELLO ')
assert upper(unicode('HELLO 123')) == unicode('HELLO 123')
print 'lower()'
assert lower('HeLLo') == 'hello'
assert lower('hello') == 'hello'
assert lower('HELLO') == 'hello'
assert lower('HELLO ') == 'hello '
assert lower('HELLO 123') == 'hello 123'
if HAVE_UNICODE:
assert lower(unicode('HeLLo')) == unicode('hello')
assert lower(unicode('hello')) == unicode('hello')
assert lower(unicode('HELLO')) == unicode('hello')
assert lower(unicode('HELLO ')) == unicode('hello ')
assert lower(unicode('HELLO 123')) == unicode('hello 123')
print 'isascii()'
assert isascii('abc') == 1
assert isascii('abcäöü') == 0
assert isascii('abcäöüdef') == 0
assert isascii('.,- 1234') == 1
if HAVE_UNICODE:
assert isascii(uabc) == 1
assert isascii(unicode('abcäöü', 'latin-1')) == 0
assert isascii(unicode('abcäöüdef', 'latin-1')) == 0
assert isascii(unicode('.,- 1234')) == 1
print 'setstrip()'
assert setstrip('Hello', set('')) == 'Hello'
assert setstrip('Hello', set('o')) == 'Hell'
assert setstrip(' Hello ', set(' o')) == 'Hell'
assert setstrip(' Hello ', set(' o'), 0, len(' Hello '), -1) == 'Hello '
assert setstrip(' Hello ', set(' o'), 0, len(' Hello '), 1) == ' Hell'
assert setstrip(' ', set(' ')) == ''
print 'setsplit()'
assert setsplit('Hello', set('l')) == ['He', 'o']
assert setsplit('Hello', set('lo')) == ['He',]
assert setsplit('Hello', set('abc')) == ['Hello',]
print 'setsplitx()'
assert setsplitx('Hello', set('l')) == ['He', 'll', 'o']
assert setsplitx('Hello', set('lo')) == ['He', 'llo']
assert setsplitx('Hello', set('abc')) == ['Hello',]
print 'joinlist()'
assert joinlist('Hello', [('A',1,2), ('B',3,4)]) == \
[('Hello', 0, 1), 'A', ('Hello', 2, 3), 'B', ('Hello', 4, 5)]
assert join(joinlist('Hello', [('A',1,2), ('B',3,4)])) == \
'HAlBo'
if HAVE_UNICODE:
assert joinlist(uHello, [('A',1,2), ('B',3,4)]) == \
[(uHello, 0, 1), 'A', (uHello, 2, 3), 'B', (uHello, 4, 5)]
assert join(joinlist(uHello, [('A',1,2), ('B',3,4)])) == \
unicode('HAlBo')
assert join(joinlist('Hello', [(ua,1,2), (ub,3,4)])) == \
unicode('Halbo')
print 'charsplit()'
assert charsplit('Hello', 'l') == ['He', '', 'o']
assert charsplit('Hello', 'e') == ['H', 'llo']
assert charsplit('HelloHello', 'e') == ['H', 'lloH', 'llo']
if HAVE_UNICODE:
assert charsplit(uHello, unicode('l')) == [unicode('He'), unicode(''), unicode('o')]
assert charsplit(uHello, unicode('e')) == [unicode('H'), unicode('llo')]
assert charsplit(uHello*2, unicode('e')) == [unicode('H'), unicode('lloH'), unicode('llo')]
print 'CharSet().contains()'
tests = [
("a-z",
('a', 1), ('b', 1), ('c', 1), ('z', 1),
('A', 0), ('B', 0), ('C', 0), ('Z', 0),
),
("a\-z",
('a', 1), ('b', 0), ('c', 0), ('z', 1), ('-', 1),
),
]
if HAVE_UNICODE:
tests[len(tests):] = [
("a-z",
('a', 1), ('b', 1), ('c', 1), ('z', 1),
('A', 0), ('B', 0), ('C', 0), ('Z', 0),
(unicode('a'), 1), (unicode('b'), 1), (unicode('c'), 1), (unicode('z'), 1),
(unicode('A'), 0), (unicode('B'), 0), (unicode('C'), 0), (unicode('Z'), 0),
),
("abc",
('a', 1), ('b', 1), ('c', 1), ('z', 0),
('A', 0), ('B', 0), ('C', 0), ('Z', 0),
(unicode('a'), 1), (unicode('b'), 1), (unicode('c'), 1), (unicode('z'), 0),
(unicode('A'), 0), (unicode('B'), 0), (unicode('C'), 0), (unicode('Z'), 0),
),
(unicode("abc"),
('a', 1), ('b', 1), ('c', 1), ('z', 0),
('A', 0), ('B', 0), ('C', 0), ('Z', 0),
(unicode('a'), 1), (unicode('b'), 1), (unicode('c'), 1), (unicode('z'), 0),
(unicode('A'), 0), (unicode('B'), 0), (unicode('C'), 0), (unicode('Z'), 0),
),
(unicode('a-z\uFFFF', 'unicode-escape'),
('a', 1), ('b', 1), ('c', 1), ('z', 1),
('A', 0), ('B', 0), ('C', 0), ('Z', 0),
(unicode('a'), 1), (unicode('b'), 1), (unicode('c'), 1), (unicode('z'), 1),
(unicode('A'), 0), (unicode('B'), 0), (unicode('C'), 0), (unicode('Z'), 0),
(unichr(55555), 0), (unichr(1234), 0), (unichr(1010), 0),
(unichr(0xFFFF), 1),
),
(unicode("a\-z"),
('a', 1), ('b', 0), ('c', 0), ('z', 1), ('-', 1),
),
]
for test in tests:
cs = CharSet(test[0])
for ch, rc in test[1:]:
assert cs.contains(ch) == rc, \
'CharSet(%s).contains(%s) ... expected: %s' % \
(repr(cs.definition), repr(ch), rc)
print 'CharSet().search()'
tests = [
("a-z",
('', None), ('abc', 0), ('ABCd', 3),
),
("a\-z",
('', None), ('bcd', None), ('ABCd', None), ('zzz', 0),
),
("abc",
('', None), ('bcd', 0), ('ABCd', None), ('zzz', None), ('dddbbb', 3),
),
]
if HAVE_UNICODE:
tests[len(tests):] = [
("a-z",
('', None), ('abc', 0), ('ABCd', 3),
(unicode(''), None), (unicode('abc'), 0), (unicode('ABCd'), 3),
),
("a\-z",
('', None), ('bcd', None), ('ABCd', None), ('zzz', 0),
(unicode(''), None), (unicode('bcd'), None), (unicode('ABCd'), None), (unicode('zzz'), 0),
),
("abc",
('', None), ('bcd', 0), ('ABCd', None), ('zzz', None), ('dddbbb', 3),
(unicode(''), None), (unicode('bcd'), 0), (unicode('ABCd'), None), (unicode('zzz'), None), (unicode('dddbbb'), 3),
),
(unicode('a-z\uFFFF', 'unicode-escape'),
('', None), ('abc', 0), ('ABCd', 3),
(unicode(''), None), (unicode('abc'), 0), (unicode('ABCd'), 3),
(unichr(0xFFFF), 0),
),
(unicode('a\-z'),
('', None), ('bcd', None), ('ABCd', None), ('zzz', 0),
(unicode(''), None), (unicode('bcd'), None), (unicode('ABCd'), None), (unicode('zzz'), 0),
),
(unicode('abc'),
('', None), ('bcd', 0), ('ABCd', None), ('zzz', None), ('dddbbb', 3),
(unicode(''), None), (unicode('bcd'), 0), (unicode('ABCd'), None), (unicode('zzz'), None), (unicode('dddbbb'), 3),
),
]
for test in tests:
cs = CharSet(test[0])
for ch, rc in test[1:]:
assert cs.search(ch) == rc, \
'CharSet(%s).search(%s) ... expected: %s' % \
(repr(cs.definition), repr(ch), rc)
print 'CharSet().match()'
tests = [
("a-z",
('', 0), ('abc', 3), ('ABCd', 0),
),
("a\-z",
('', 0), ('bcd', 0), ('ABCd', 0), ('zzz', 3),
),
("abc",
('', 0), ('bcd', 2), ('ABCd', 0), ('zzz', 0), ('dddbbb', 0),
),
]
if HAVE_UNICODE:
tests[len(tests):] = [
("a-z",
('', 0), ('abc', 3), ('ABCd', 0),
(unicode(''), 0), (unicode('abc'), 3), (unicode('ABCd'), 0),
),
("a\-z",
('', 0), ('bcd', 0), ('ABCd', 0), ('zzz', 3),
(unicode(''), 0), (unicode('bcd'), 0), (unicode('ABCd'), 0), (unicode('zzz'), 3),
),
("abc",
('', 0), ('bcd', 2), ('ABCd', 0), ('zzz', 0), ('dddbbb', 0),
(unicode(''), 0), (unicode('bcd'), 2), (unicode('ABCd'), 0), (unicode('zzz'), 0), (unicode('dddbbb'), 0),
),
(unicode('a-z\uFFFF', 'unicode-escape'),
('', 0), ('abc', 3), ('ABCd', 0),
(unicode(''), 0), (unicode('abc'), 3), (unicode('ABCd'), 0),
(unichr(0xFFFF), 1),
),
(unicode('a\-z'),
('', 0), ('bcd', 0), ('ABCd', 0), ('zzz', 3),
(unicode(''), 0), (unicode('bcd'), 0), (unicode('ABCd'), 0), (unicode('zzz'), 3),
),
(unicode('abc'),
('', 0), ('bcd', 2), ('ABCd', 0), ('zzz', 0), ('dddbbb', 0),
(unicode(''), 0), (unicode('bcd'), 2), (unicode('ABCd'), 0), (unicode('zzz'), 0), (unicode('dddbbb'), 0),
),
]
for test in tests:
cs = CharSet(test[0])
for ch, rc in test[1:]:
assert cs.match(ch) == rc, \
'CharSet(%s).match(%s) ... expected: %s' % \
(repr(cs.definition), repr(ch), rc)
print 'CharSet().strip()'
assert CharSet('').strip('Hello') == 'Hello'
assert CharSet('o').strip('Hello') == 'Hell'
assert CharSet(' o').strip(' Hello ') == 'Hell'
assert CharSet(' o').strip(' Hello ', -1, 0, len(' Hello ')) == 'Hello '
assert CharSet(' o').strip(' Hello ', 1, 0, len(' Hello ')) == ' Hell'
assert CharSet(' ').strip(' ') == ''
if HAVE_UNICODE:
assert CharSet('').strip(unicode('Hello')) == unicode('Hello')
assert CharSet('o').strip(unicode('Hello')) == unicode('Hell')
assert CharSet(' o').strip(unicode(' Hello ')) == unicode('Hell')
assert CharSet(' o').strip(unicode(' Hello '), -1, 0, len(unicode(' Hello '))) == unicode('Hello ')
assert CharSet(' o').strip(unicode(' Hello '), 1, 0, len(unicode(' Hello '))) == unicode(' Hell')
assert CharSet(unicode('')).strip(unicode('Hello')) == unicode('Hello')
assert CharSet(unicode('o')).strip(unicode('Hello')) == unicode('Hell')
assert CharSet(unicode(' o')).strip(unicode(' Hello ')) == unicode('Hell')
assert CharSet(unicode(' o')).strip(unicode(' Hello '), -1, 0, len(unicode(' Hello '))) == unicode('Hello ')
assert CharSet(unicode(' o')).strip(unicode(' Hello '), 1, 0, len(unicode(' Hello '))) == unicode(' Hell')
print 'CharSet().split()'
assert CharSet('l').split('Hello') == ['He', 'o']
assert CharSet('lo').split('Hello') == ['He',]
assert CharSet('abc').split('Hello') == ['Hello',]
if HAVE_UNICODE:
assert CharSet('l').split(unicode('Hello')) == ['He', 'o']
assert CharSet('lo').split(unicode('Hello')) == ['He',]
assert CharSet('abc').split(unicode('Hello')) == ['Hello',]
assert CharSet(unicode('l')).split(unicode('Hello')) == ['He', 'o']
assert CharSet(unicode('lo')).split(unicode('Hello')) == ['He',]
assert CharSet(unicode('abc')).split(unicode('Hello')) == ['Hello',]
print 'CharSet().splitx()'
assert CharSet('l').splitx('Hello') == ['He', 'll', 'o']
assert CharSet('lo').splitx('Hello') == ['He', 'llo']
assert CharSet('abc').splitx('Hello') == ['Hello',]
assert CharSet(' ').splitx('x y ') == ['x', ' ', 'y', ' ']
assert CharSet(' ').splitx(' x y ') == ['', ' ', 'x', ' ', 'y', ' ']
if HAVE_UNICODE:
assert CharSet('l').splitx(unicode('Hello')) == ['He', 'll', 'o']
assert CharSet('lo').splitx(unicode('Hello')) == ['He', 'llo']
assert CharSet('abc').splitx(unicode('Hello')) == ['Hello',]
assert CharSet(' ').splitx(unicode('x y ')) == ['x', ' ', 'y', ' ']
assert CharSet(' ').splitx(unicode(' x y ')) == ['', ' ', 'x', ' ', 'y', ' ']
assert CharSet(unicode('l')).splitx(unicode('Hello')) == ['He', 'll', 'o']
assert CharSet(unicode('lo')).splitx(unicode('Hello')) == ['He', 'llo']
assert CharSet(unicode('abc')).splitx(unicode('Hello')) == ['Hello',]
assert CharSet(unicode(' ')).splitx(unicode('x y ')) == ['x', ' ', 'y', ' ']
assert CharSet(unicode(' ')).splitx(unicode(' x y ')) == ['', ' ', 'x', ' ', 'y', ' ']
print 'CharSet() negative logic matching'
assert CharSet('0-9').contains('a') == 0
assert CharSet('^0-9').contains('a') == 1
assert CharSet('0-9').match('abc') == 0
assert CharSet('0-9').match('123abc') == 3
assert CharSet('0-9').match('abc123') == 0
assert CharSet('0-9').search('abc') == None
assert CharSet('0-9').search('123abc') == 0
assert CharSet('0-9').search('abc123') == 3
assert CharSet('^0-9').match('abc') == 3
assert CharSet('^0-9').match('123abc') == 0
assert CharSet('^0-9').match('abc123') == 3
assert CharSet('^0-9').search('abc') == 0
assert CharSet('^0-9').search('123abc') == 3
assert CharSet('^0-9').search('abc123') == 0
if HAVE_UNICODE:
assert CharSet('0-9').contains(unicode('a')) == 0
assert CharSet('^0-9').contains(unicode('a')) == 1
assert CharSet('0-9').match(unicode('abc')) == 0
assert CharSet('0-9').match(unicode('123abc')) == 3
assert CharSet('0-9').match(unicode('abc123')) == 0
assert CharSet('0-9').search(unicode('abc')) == None
assert CharSet('0-9').search(unicode('123abc')) == 0
assert CharSet('0-9').search(unicode('abc123')) == 3
assert CharSet('^0-9').match(unicode('abc')) == 3
assert CharSet('^0-9').match(unicode('123abc')) == 0
assert CharSet('^0-9').match(unicode('abc123')) == 3
assert CharSet('^0-9').search(unicode('abc')) == 0
assert CharSet('^0-9').search(unicode('123abc')) == 3
assert CharSet('^0-9').search(unicode('abc123')) == 0
assert CharSet(unicode('0-9')).contains(unicode('a')) == 0
assert CharSet(unicode('^0-9')).contains(unicode('a')) == 1
assert CharSet(unicode('0-9')).match(unicode('abc')) == 0
assert CharSet(unicode('0-9')).match(unicode('123abc')) == 3
assert CharSet(unicode('0-9')).match(unicode('abc123')) == 0
assert CharSet(unicode('0-9')).search(unicode('abc')) == None
assert CharSet(unicode('0-9')).search(unicode('123abc')) == 0
assert CharSet(unicode('0-9')).search(unicode('abc123')) == 3
assert CharSet(unicode('^0-9')).match(unicode('abc')) == 3
assert CharSet(unicode('^0-9')).match(unicode('123abc')) == 0
assert CharSet(unicode('^0-9')).match(unicode('abc123')) == 3
assert CharSet(unicode('^0-9')).search(unicode('abc')) == 0
assert CharSet(unicode('^0-9')).search(unicode('123abc')) == 3
assert CharSet(unicode('^0-9')).search(unicode('abc123')) == 0
print 'CharSet() pickling'
cs = CharSet('abc')
pcs = pickle.dumps(cs)
cs1 = pickle.loads(pcs)
assert cs.match('abc') == cs1.match('abc')
assert cs.match('') == cs1.match('')
assert cs.match('eee') == cs1.match('eee')
assert cs.match(' abc') == cs1.match(' abc')
assert cs.match('abc...d') == cs1.match('abc...d')
assert cs.search('xxxabc') == cs1.search('xxxabc')
###
htmltag = (
(None,Is,'<'),
# is this a closing tag ?
('closetag',Is,'/',+1),
# a coment ?
('comment',Is,'!','check-xmp-tag'),
(None,Word,'--',+4),
('text',WordStart,'-->',+1),
(None,Skip,3),
(None,Jump,To,MatchOk),
# a SGML-Tag ?
('other',AllNotIn,'>',+1),
(None,Is,'>'),
(None,Jump,To,MatchOk),
# XMP-Tag ?
'check-xmp-tag',
('tagname',Word,'xmp','get-tagname'),
(None,Is,'>'),
('text',WordStart,'</xmp>'),
(None,Skip,len('</xmp>')),
(None,Jump,To,MatchOk),
# get the tag name
'get-tagname',
('tagname',AllInCharSet,tagname_charset),
# look for attributes
'get-attributes',
(None,AllInCharSet,white_charset,'incorrect-attributes'),
(None,Is,'>',+1,MatchOk),
('tagattr',Table,tagattr),
(None,Jump,To,-3),
(None,Is,'>',+1,MatchOk),
# handle incorrect attributes
'incorrect-attributes',
(error,AllNotIn,'> \n\r\t'),
(None,Jump,To,'get-attributes')
)
print 'TagTable()'
htmltable_tt = TagTable(htmltable)
htmltag_tt = TagTable(htmltag)
if HAVE_UNICODE:
print 'UnicodeTagTable()'
utt = UnicodeTagTable(htmltag)
print 'TagTable() pickling'
ptt = pickle.dumps(htmltable_tt)
tt1 = pickle.loads(ptt)
print 'TextSearch() pickling'
pts = pickle.dumps(TextSearch('test'))
ts1 = pickle.loads(pts)
if 0:
print 'HTML Table:'
pprint.pprint(htmltable)
print 'TagTable .dump() version of the HTML table:'
pprint.pprint(htmltable_tt.dump())
###
print 'TextSearch() object (Boyer-Moore)'
ts = TextSearch('test')
ts = TextSearch('test', None)
ts = TextSearch('test', 'x'*256)
ts = TextSearch('test', None, BOYERMOORE)
ts = TextSearch('test')
assert ts.search(' test') == (4, 8), ts.search(' test')
assert ts.search(' test ') == (4, 8)
assert ts.search(' abc ') == (0, 0)
assert ts.find(' test') == 4, ts.find(' test')
assert ts.find(' test ') == 4
assert ts.find(' abd ') == -1
assert ts.findall(' test test ') == [(4, 8), (10, 14)]
assert ts.findall(' abc def ') == []
if HAVE_UNICODE:
try:
ts.search(unicode(' test'))
except TypeError:
pass
else:
raise AssertionError,'Boyer-Moore does not work with Unicode'
try:
ts.find(unicode(' test'))
except TypeError:
pass
else:
raise AssertionError,'Boyer-Moore does not work with Unicode'
try:
ts.findall(unicode(' test test '))
except TypeError:
pass
else:
raise AssertionError,'Boyer-Moore does not work with Unicode'
try:
ts = TextSearch('test', None, FASTSEARCH)
except ValueError:
pass
else:
print 'TextSearch() object (FastSearch)'
assert ts.search(' test') == (4, 8)
assert ts.search(' test ') == (4, 8)
assert ts.search(' abc ') == (0, 0)
assert ts.find(' test') == 4
assert ts.find(' test ') == 4
assert ts.find(' abd ') == -1
assert ts.findall(' test test ') == [(4, 8), (10, 14)]
assert ts.findall(' abc def ') == []
print 'TextSearch() object (Trivial)'
ts = TextSearch('test', algorithm=TRIVIAL)
assert ts.search(' test') == (4, 8)
assert ts.search(' test ') == (4, 8)
assert ts.search(' abc ') == (0, 0)
assert ts.find(' test') == 4
assert ts.find(' test ') == 4
assert ts.find(' abd ') == -1
assert ts.findall(' test test ') == [(4, 8), (10, 14)]
assert ts.findall(' abc def ') == []
if HAVE_UNICODE:
print 'TextSearch() object (Trivial; Unicode)'
assert ts.search(unicode(' test')) == (4, 8)
assert ts.search(unicode(' test ')) == (4, 8)
assert ts.search(unicode(' abc ')) == (0, 0)
assert ts.find(unicode(' test')) == 4
assert ts.find(unicode(' test ')) == 4
assert ts.find(unicode(' abd ')) == -1
assert ts.findall(unicode(' test test ')) == [(4, 8), (10, 14)]
assert ts.findall(unicode(' abc def ')) == []
ts = TextSearch(unicode('test'), algorithm=TRIVIAL)
assert ts.search(' test') == (4, 8)
assert ts.search(' test ') == (4, 8)
assert ts.search(' abc ') == (0, 0)
assert ts.find(' test') == 4
assert ts.find(' test ') == 4
assert ts.find(' abd ') == -1
assert ts.findall(' test test ') == [(4, 8), (10, 14)]
assert ts.findall(' abc def ') == []
assert ts.search(unicode(' test')) == (4, 8)
assert ts.search(unicode(' test ')) == (4, 8)
assert ts.search(unicode(' abc ')) == (0, 0)
assert ts.find(unicode(' test')) == 4
assert ts.find(unicode(' test ')) == 4
assert ts.find(unicode(' abd ')) == -1
assert ts.findall(unicode(' test test ')) == [(4, 8), (10, 14)]
assert ts.findall(unicode(' abc def ')) == []
ts = TextSearch(unicode('test'))
assert ts.algorithm == TRIVIAL
print 'is_whitespace()'
assert is_whitespace(' \t\r') == 1
assert is_whitespace(' 123 ') == 0
if HAVE_UNICODE:
assert is_whitespace(unicode(' \t\r')) == 1
assert is_whitespace(unicode(' 123 ')) == 0
print 'collapse()'
assert collapse('a\nb\nc') == 'a b c'
assert collapse('a\nb\nc', '-') == 'a-b-c'
if HAVE_UNICODE:
assert collapse(unicode('a\nb\nä','latin-1')) == unicode('a b ä','latin-1')
assert collapse(unicode('a\nb\nä','latin-1'), '-') == unicode('a-b-ä','latin-1')
print 'splitwords()'
assert splitwords('a b c') == ['a', 'b', 'c']
if HAVE_UNICODE:
assert splitwords(unicode('a b ä','latin-1')) == [ua, ub, unicode('ä','latin-1')]
print 'countlines()'
assert countlines('a\nb\nc') == 3
assert countlines('a\nb\nc\n') == 3
if HAVE_UNICODE:
assert countlines(unicode('a\nb\nä','latin-1')) == 3
print 'splitlines()'
assert splitlines('a\nb\r\nc') == ['a', 'b', 'c']
assert splitlines('a\nb\r\nc\r') == ['a', 'b', 'c']
if HAVE_UNICODE:
assert splitlines(unicode('a\nb\r\nä\r','latin-1')) == [ua, ub, unicode('ä','latin-1')]
print 'replace()'
assert replace('a\nb\nc', '\n', ' ') == 'a b c'
assert replace('a\nb\nc', '\n', '-') == 'a-b-c'
if HAVE_UNICODE:
assert replace(unicode('a\nb\nä','latin-1'), '\n', ' ') == unicode('a b ä','latin-1')
assert replace(unicode('a\nb\nä','latin-1'), '\n', '-') == unicode('a-b-ä','latin-1')
print 'multireplace()'
assert multireplace('a\nb\nc', [(' ', 1, 2)]) == 'a b\nc'
assert multireplace('a\nb\nc', [('-', 1, 2), ('-', 3, 4)]) == 'a-b-c'
if HAVE_UNICODE:
assert multireplace(unicode('a\nb\nä','latin-1'), [(' ', 1, 2)]) == unicode('a b\nä','latin-1')
assert multireplace(unicode('a\nb\nä','latin-1'), [('-', 1, 2), ('-', 3, 4)]) == unicode('a-b-ä','latin-1')
print 'quoted_split()'
assert quoted_split(' a, b ,\t c,d ,e ,"ab,cd,de" ,\'a,b\'', ',') == \
['a', 'b', 'c', 'd', 'e', 'ab,cd,de', 'a,b']
# twice to test table cache
assert quoted_split(' a, b ,\t c,d ,e ,"ab,cd,de" ,\'a,b\'', ',') == \
['a', 'b', 'c', 'd', 'e', 'ab,cd,de', 'a,b']
assert quoted_split(' a b \t c d e "ab cd de" \'a b\'') == \
['a', 'b', 'c', 'd', 'e', 'ab cd de', 'a b']
assert quoted_split(',,a', ',') == ['', '', 'a']
assert quoted_split(',,a,', ',') == ['', '', 'a', '']
if HAVE_UNICODE:
assert quoted_split(unicode(' a, b ,\t c,d ,e ,"ab,cd,de" ,\'a,b\''), ',') == \
[ua, ub, uc, ud, ue, unicode('ab,cd,de'), unicode('a,b')]
assert quoted_split(unicode(',,a'), ',') == [uempty, uempty, ua]
assert quoted_split(unicode(',,a,'), ',') == [uempty, uempty, ua, uempty]
# Clear the TagTable cache
tagtable_cache.clear()
break
print
print 'Works.'
| agpl-3.0 |
RoGeorge/SCPI_solder_station | functions.py | 1 | 3306 | __author__ = 'RoGeorge'
import platform
import os
import telnetlib
import sys
# Check network response (ping)
def ping_IP(instrument, IP):
if platform.system() == "Windows":
response = os.system("ping -n 1 " + IP + " > nul")
else:
response = os.system("ping -c 1 " + IP + " > /dev/null")
if response != 0:
print
print "No response pinging " + IP
print "Check network cables and settings."
print "You should be able to ping the " + instrument + "."
# Open a telnet session for Rigol instrument
def connect_to(instrument, IP, port):
tn = telnetlib.Telnet(IP, port)
# Ask for instrument ID
tn.write("*idn?")
instrument_id = tn.read_until("\n", 1)
COMPANY = 0
MODEL = 1
id_fields = instrument_id.split(",")
# Check if the instrument is set to accept LAN commands
if id_fields[COMPANY] != "RIGOL TECHNOLOGIES":
print instrument_id
print "Non Rigol:,", instrument, "or the", instrument, "does not accept LAN commands."
print "Check the", instrument, "settings."
if instrument == "oscilloscope":
print "Utility -> IO Setting -> RemoteIO -> LAN must be ON"
if instrument == "power supply":
print "Utility -> IO Config -> LAN -> LAN Status must be Configured"
sys.exit("ERROR")
return tn, id_fields[MODEL]
def connect_verify(instrument, IP, port):
ping_IP(instrument, IP)
tn, model = connect_to(instrument, IP, port)
if instrument == "oscilloscope" and model != "DS1104Z" or \
instrument == "power supply" and model != "DP832":
print model, "is an unknown", instrument, "type."
sys.exit("ERROR")
return tn
def command(tn, SCPI):
response = ""
while response != "1\n":
tn.write("*OPC?") # operation(s) completed ?
response = tn.read_until("\n", 1) # wait max 3s for an answer
tn.write(SCPI)
def init_oscilloscope(tn):
# General settings
command(tn, "RUN") # Run mode ON
# Channel 1 settings
command(tn, "CHANnel1:PROBe 10")
command(tn, "CHANnel1:BWLimit 20M") # BW Limit 20 MHz
command(tn, "CHANnel1:COUPling DC")
command(tn, "CHANnel1:SCALe 0.01") # 10 mV/div
command(tn, "CHANnel1:OFFSet 0") # If the trace is out of range, the Vavg can not be calculated
command(tn, "CHANnel1:DISPlay ON")
# Timebase settings
command(tn, "TIMebase:MAIN:SCALe 0.0001")
# Trigger settings
command(tn, "TRIGger:SWEep AUTO") # Trig Auto
command(tn, "TRIGger:EDGe:SOURce CHANnel1")
command(tn, "TRIGger:EDGe:LEVel 0")
# Acquisition settings
# Measurement settings
# command(tn, "MEASure:STATistic:RESet")
def init_power_supply(tn):
command(tn, "OUTPut:TRACk CH1, OFF") # CH2 NOT mirror
command(tn, "SOURce1:VOLTage 0") # CH2 set 0V
command(tn, "SOURce1:CURRent 3") # CH2 set 3A
command(tn, "OUTPut:OVP:VALue CH1, 25") # CH2 OVP limit 25 V
command(tn, "OUTPut:OVP CH1, ON") # CH2 OVP on
command(tn, "OUTPut:OCP:VALue CH1, 3.2") # CH2 limit 3.2A
command(tn, "OUTPut:OCP CH1, ON") # CH2 OCP on
command(tn, "OUTPut CH1, ON") # CH2 ON
| gpl-2.0 |
ct-23/home-assistant | tests/components/image_processing/test_microsoft_face_detect.py | 18 | 5421 | """The tests for the microsoft face detect platform."""
from unittest.mock import patch, PropertyMock
from homeassistant.core import callback
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.setup import setup_component
import homeassistant.components.image_processing as ip
import homeassistant.components.microsoft_face as mf
from tests.common import (
get_test_home_assistant, assert_setup_component, load_fixture, mock_coro)
class TestMicrosoftFaceDetectSetup(object):
"""Test class for image processing."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.microsoft_face.'
'MicrosoftFace.update_store', return_value=mock_coro())
def test_setup_platform(self, store_mock):
"""Setup platform with one entity."""
config = {
ip.DOMAIN: {
'platform': 'microsoft_face_detect',
'source': {
'entity_id': 'camera.demo_camera'
},
'attributes': ['age', 'gender'],
},
'camera': {
'platform': 'demo'
},
mf.DOMAIN: {
'api_key': '12345678abcdef6',
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
assert self.hass.states.get(
'image_processing.microsoftface_demo_camera')
@patch('homeassistant.components.microsoft_face.'
'MicrosoftFace.update_store', return_value=mock_coro())
def test_setup_platform_name(self, store_mock):
"""Setup platform with one entity and set name."""
config = {
ip.DOMAIN: {
'platform': 'microsoft_face_detect',
'source': {
'entity_id': 'camera.demo_camera',
'name': 'test local'
},
},
'camera': {
'platform': 'demo'
},
mf.DOMAIN: {
'api_key': '12345678abcdef6',
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
assert self.hass.states.get('image_processing.test_local')
class TestMicrosoftFaceDetect(object):
"""Test class for image processing."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = {
ip.DOMAIN: {
'platform': 'microsoft_face_detect',
'source': {
'entity_id': 'camera.demo_camera',
'name': 'test local'
},
'attributes': ['age', 'gender'],
},
'camera': {
'platform': 'demo'
},
mf.DOMAIN: {
'api_key': '12345678abcdef6',
}
}
self.endpoint_url = "https://westus.{0}".format(mf.FACE_API_URL)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.image_processing.microsoft_face_detect.'
'MicrosoftFaceDetectEntity.should_poll',
new_callable=PropertyMock(return_value=False))
def test_ms_detect_process_image(self, poll_mock, aioclient_mock):
"""Setup and scan a picture and test plates from event."""
aioclient_mock.get(
self.endpoint_url.format("persongroups"),
text=load_fixture('microsoft_face_persongroups.json')
)
aioclient_mock.get(
self.endpoint_url.format("persongroups/test_group1/persons"),
text=load_fixture('microsoft_face_persons.json')
)
aioclient_mock.get(
self.endpoint_url.format("persongroups/test_group2/persons"),
text=load_fixture('microsoft_face_persons.json')
)
setup_component(self.hass, ip.DOMAIN, self.config)
state = self.hass.states.get('camera.demo_camera')
url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
face_events = []
@callback
def mock_face_event(event):
"""Mock event."""
face_events.append(event)
self.hass.bus.listen('image_processing.detect_face', mock_face_event)
aioclient_mock.get(url, content=b'image')
aioclient_mock.post(
self.endpoint_url.format("detect"),
text=load_fixture('microsoft_face_detect.json'),
params={'returnFaceAttributes': "age,gender"}
)
ip.scan(self.hass, entity_id='image_processing.test_local')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.test_local')
assert len(face_events) == 1
assert state.attributes.get('total_faces') == 1
assert state.state == '1'
assert face_events[0].data['age'] == 71.0
assert face_events[0].data['gender'] == 'male'
assert face_events[0].data['entity_id'] == \
'image_processing.test_local'
| apache-2.0 |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/learn/python/learn/tests/dataframe/csv_parser_test.py | 18 | 2279 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learn.python.learn.dataframe.transforms.csv_parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class CSVParserTestCase(test.TestCase):
def testParse(self):
parser = csv_parser.CSVParser(
column_names=["col0", "col1", "col2"], default_values=["", "", 1.4])
csv_lines = ["one,two,2.5", "four,five,6.0"]
csv_input = constant_op.constant(
csv_lines, dtype=dtypes.string, shape=[len(csv_lines)])
csv_column = mocks.MockSeries("csv", csv_input)
expected_output = [
np.array([b"one", b"four"]), np.array([b"two", b"five"]),
np.array([2.5, 6.0])
]
output_columns = parser(csv_column)
self.assertEqual(3, len(output_columns))
cache = {}
output_tensors = [o.build(cache) for o in output_columns]
self.assertEqual(3, len(output_tensors))
with self.test_session() as sess:
output = sess.run(output_tensors)
for expected, actual in zip(expected_output, output):
np.testing.assert_array_equal(actual, expected)
if __name__ == "__main__":
test.main()
| apache-2.0 |
PongPi/isl-odoo | addons/auth_crypt/__openerp__.py | 310 | 2298 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. (<http://odoo.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Password Encryption',
'version': '2.0',
'author': ['OpenERP SA', 'FS3'],
'maintainer': 'OpenERP SA',
'website': 'https://www.odoo.com',
'category': 'Tools',
'description': """
Encrypted passwords
===================
Replaces the default password storage with a strong cryptographic
hash.
The key derivation function currently used is RSA Security LLC's
industry-standard ``PKDF2``, in combination with ``SHA512``.
This includes salting and key stretching with several thousands
rounds.
All passwords are encrypted as soon as the module is installed.
This may take a few minutes if there are thousands of users.
Past versions of encrypted passwords will be automatically upgraded
to the current scheme whenever a user authenticates
(``auth_crypt`` was previously using the weaker ``md5crypt`` key
derivation function).
Note: Installing this module permanently prevents user password
recovery and cannot be undone. It is thus recommended to enable
some password reset mechanism for users, such as the one provided
by the ``auth_signup`` module (signup for new users does not
necessarily have to be enabled).
""",
'depends': ['base'],
'data': [],
'auto_install': True,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wyc/django | django/contrib/gis/geos/polygon.py | 450 | 6843 | from ctypes import byref, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import GEOM_PTR, get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing
from django.utils import six
from django.utils.six.moves import range
class Polygon(GEOSGeometry):
_minlength = 1
def __init__(self, *args, **kwargs):
"""
Initializes on an exterior ring and a sequence of holes (both
instances may be either LinearRing instances, or a tuple/list
that may be constructed into a LinearRing).
Examples of initialization, where shell, hole1, and hole2 are
valid LinearRing geometries:
>>> from django.contrib.gis.geos import LinearRing, Polygon
>>> shell = hole1 = hole2 = LinearRing()
>>> poly = Polygon(shell, hole1, hole2)
>>> poly = Polygon(shell, (hole1, hole2))
>>> # Example where a tuple parameters are used:
>>> poly = Polygon(((0, 0), (0, 10), (10, 10), (0, 10), (0, 0)),
... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))
"""
if not args:
raise TypeError('Must provide at least one LinearRing, or a tuple, to initialize a Polygon.')
# Getting the ext_ring and init_holes parameters from the argument list
ext_ring = args[0]
init_holes = args[1:]
n_holes = len(init_holes)
# If initialized as Polygon(shell, (LinearRing, LinearRing)) [for backward-compatibility]
if n_holes == 1 and isinstance(init_holes[0], (tuple, list)):
if len(init_holes[0]) == 0:
init_holes = ()
n_holes = 0
elif isinstance(init_holes[0][0], LinearRing):
init_holes = init_holes[0]
n_holes = len(init_holes)
polygon = self._create_polygon(n_holes + 1, (ext_ring,) + init_holes)
super(Polygon, self).__init__(polygon, **kwargs)
def __iter__(self):
"Iterates over each ring in the polygon."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of rings in this Polygon."
return self.num_interior_rings + 1
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
for z in bbox:
if not isinstance(z, six.integer_types + (float,)):
return GEOSGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' %
(x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))
return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)))
# ### These routines are needed for list-like operation w/ListMixin ###
def _create_polygon(self, length, items):
# Instantiate LinearRing objects if necessary, but don't clone them yet
# _construct_ring will throw a TypeError if a parameter isn't a valid ring
# If we cloned the pointers here, we wouldn't be able to clean up
# in case of error.
rings = []
for r in items:
if isinstance(r, GEOM_PTR):
rings.append(r)
else:
rings.append(self._construct_ring(r))
shell = self._clone(rings.pop(0))
n_holes = length - 1
if n_holes:
holes = get_pointer_arr(n_holes)
for i, r in enumerate(rings):
holes[i] = self._clone(r)
holes_param = byref(holes)
else:
holes_param = None
return capi.create_polygon(shell, holes_param, c_uint(n_holes))
def _clone(self, g):
if isinstance(g, GEOM_PTR):
return capi.geom_clone(g)
else:
return capi.geom_clone(g.ptr)
def _construct_ring(self, param, msg=(
'Parameter must be a sequence of LinearRings or objects that can initialize to LinearRings')):
"Helper routine for trying to construct a ring from the given parameter."
if isinstance(param, LinearRing):
return param
try:
ring = LinearRing(param)
return ring
except TypeError:
raise TypeError(msg)
def _set_list(self, length, items):
# Getting the current pointer, replacing with the newly constructed
# geometry, and destroying the old geometry.
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_polygon(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
def _get_single_internal(self, index):
"""
Returns the ring at the specified index. The first index, 0, will
always return the exterior ring. Indices > 0 will return the
interior ring at the given index (e.g., poly[1] and poly[2] would
return the first and second interior ring, respectively).
CAREFUL: Internal/External are not the same as Interior/Exterior!
_get_single_internal returns a pointer from the existing geometries for use
internally by the object's methods. _get_single_external returns a clone
of the same geometry for use by external code.
"""
if index == 0:
return capi.get_extring(self.ptr)
else:
# Getting the interior ring, have to subtract 1 from the index.
return capi.get_intring(self.ptr, index - 1)
def _get_single_external(self, index):
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
# #### Polygon Properties ####
@property
def num_interior_rings(self):
"Returns the number of interior rings."
# Getting the number of rings
return capi.get_nrings(self.ptr)
def _get_ext_ring(self):
"Gets the exterior ring of the Polygon."
return self[0]
def _set_ext_ring(self, ring):
"Sets the exterior ring of the Polygon."
self[0] = ring
# Properties for the exterior ring/shell.
exterior_ring = property(_get_ext_ring, _set_ext_ring)
shell = exterior_ring
@property
def tuple(self):
"Gets the tuple for each ring in this Polygon."
return tuple(self[i].tuple for i in range(len(self)))
coords = tuple
@property
def kml(self):
"Returns the KML representation of this Polygon."
inner_kml = ''.join("<innerBoundaryIs>%s</innerBoundaryIs>" % self[i + 1].kml
for i in range(self.num_interior_rings))
return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % (self[0].kml, inner_kml)
| bsd-3-clause |
Valloric/hyde | hyde/ext/publishers/pyfs.py | 2 | 4080 | """
Contains classes and utilities that help publishing a hyde website to
a filesystem using PyFilesystem FS objects.
This publisher provides an easy way to publish to FTP, SFTP, WebDAV or other
servers by specifying a PyFS filesystem URL. For example, the following
are valid URLs that can be used with this publisher:
ftp://my.server.com/~username/my_blog/
dav:https://username:password@my.server.com/path/to/my/site
"""
import getpass
import hashlib
from hyde.fs import File, Folder
from hyde.publisher import Publisher
from hyde.util import getLoggerWithNullHandler
logger = getLoggerWithNullHandler('hyde.ext.publishers.pyfs')
try:
from fs.osfs import OSFS
from fs.path import pathjoin
from fs.opener import fsopendir
except ImportError:
logger.error("The PyFS publisher requires PyFilesystem v0.4 or later.")
logger.error("`pip install -U fs` to get it.")
raise
class PyFS(Publisher):
def initialize(self, settings):
self.settings = settings
self.url = settings.url
self.check_mtime = getattr(settings,"check_mtime",False)
self.check_etag = getattr(settings,"check_etag",False)
if self.check_etag and not isinstance(self.check_etag,basestring):
raise ValueError("check_etag must name the etag algorithm")
self.prompt_for_credentials()
self.fs = fsopendir(self.url)
def prompt_for_credentials(self):
credentials = {}
if "%(username)s" in self.url:
print "Username: ",
credentials["username"] = raw_input().strip()
if "%(password)s" in self.url:
credentials["password"] = getpass.getpass("Password: ")
if credentials:
self.url = self.url % credentials
def publish(self):
super(PyFS, self).publish()
deploy_fs = OSFS(self.site.config.deploy_root_path.path)
for (dirnm,local_filenms) in deploy_fs.walk():
logger.info("Making directory: %s",dirnm)
self.fs.makedir(dirnm,allow_recreate=True)
remote_fileinfos = self.fs.listdirinfo(dirnm,files_only=True)
# Process each local file, to see if it needs updating.
for filenm in local_filenms:
filepath = pathjoin(dirnm,filenm)
# Try to find an existing remote file, to compare metadata.
for (nm,info) in remote_fileinfos:
if nm == filenm:
break
else:
info = {}
# Skip it if the etags match
if self.check_etag and "etag" in info:
with deploy_fs.open(filepath,"rb") as f:
local_etag = self._calculate_etag(f)
if info["etag"] == local_etag:
logger.info("Skipping file [etag]: %s",filepath)
continue
# Skip it if the mtime is more recent remotely.
if self.check_mtime and "modified_time" in info:
local_mtime = deploy_fs.getinfo(filepath)["modified_time"]
if info["modified_time"] > local_mtime:
logger.info("Skipping file [mtime]: %s",filepath)
continue
# Upload it to the remote filesystem.
logger.info("Uploading file: %s",filepath)
with deploy_fs.open(filepath,"rb") as f:
self.fs.setcontents(filepath,f)
# Process each remote file, to see if it needs deleting.
for (filenm,info) in remote_fileinfos:
filepath = pathjoin(dirnm,filenm)
if filenm not in local_filenms:
logger.info("Removing file: %s",filepath)
self.fs.remove(filepath)
def _calculate_etag(self,f):
hasher = getattr(hashlib,self.check_etag.lower())()
data = f.read(1024*64)
while data:
hasher.update(data)
data = f.read(1024*64)
return hasher.hexdigest()
| mit |
cyberphox/MissionPlanner | Lib/site-packages/scipy/special/spfun_stats.py | 51 | 3507 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
# Last Change: Sat Mar 21 02:00 PM 2009 J
# Copyright (c) 2001, 2002 Enthought, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Enthought nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""Some more special functions which may be useful for multivariate statistical
analysis."""
import numpy as np
from scipy.special import gammaln as loggam
def multigammaln(a, d):
"""returns the log of multivariate gamma, also sometimes called the
generalized gamma.
Parameters
----------
a : ndarray
the multivariate gamma is computed for each item of a
d : int
the dimension of the space of integration.
Returns
-------
res : ndarray
the values of the log multivariate gamma at the given points a.
Note
----
The formal definition of the multivariate gamma of dimension d for a real a
is :
\Gamma_d(a) = \int_{A>0}{e^{-tr(A)\cdot{|A|}^{a - (m+1)/2}dA}}
with the condition a > (d-1)/2, and A>0 being the set of all the positive
definite matrices of dimension s. Note that a is a scalar: the integrand
only is multivariate, the argument is not (the function is defined over a
subset of the real set).
This can be proven to be equal to the much friendler equation:
\Gamma_d(a) = \pi^{d(d-1)/4}\prod_{i=1}^{d}{\Gamma(a - (i-1)/2)}.
Notes
-----
Reference:
R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in
probability and mathematical statistics). """
a = np.asarray(a)
if not np.isscalar(d) or (np.floor(d) != d):
raise ValueError("d should be a positive integer (dimension)")
if np.any(a <= 0.5 * (d - 1)):
raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met" \
% (a, 0.5 * (d-1)))
res = (d * (d-1) * 0.25) * np.log(np.pi)
if a.size == 1:
axis = -1
else:
axis = 0
res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis)
return res
| gpl-3.0 |
goblin/p2pool | nattraverso/pynupnp/upnp.py | 283 | 18985 | """
This module is the heart of the upnp support. Device discover, ip discovery
and port mappings are implemented here.
@author: Raphael Slinckx
@author: Anthony Baxter
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
import socket, random, urlparse, logging
from twisted.internet import reactor, defer
from twisted.web import client
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.error import CannotListenError
from twisted.python import failure
from nattraverso.pynupnp.soap import SoapProxy
from nattraverso.pynupnp.upnpxml import UPnPXml
from nattraverso import ipdiscover, portmapper
class UPnPError(Exception):
"""
A generic UPnP error, with a descriptive message as content.
"""
pass
class UPnPMapper(portmapper.NATMapper):
"""
This is the UPnP port mapper implementing the
L{NATMapper<portmapper.NATMapper>} interface.
@see: L{NATMapper<portmapper.NATMapper>}
"""
def __init__(self, upnp):
"""
Creates the mapper, with the given L{UPnPDevice} instance.
@param upnp: L{UPnPDevice} instance
"""
self._mapped = {}
self._upnp = upnp
def map(self, port):
"""
See interface
"""
self._check_valid_port(port)
#Port is already mapped
if port in self._mapped:
return defer.succeed(self._mapped[port])
#Trigger a new mapping creation, first fetch local ip.
result = ipdiscover.get_local_ip()
self._mapped[port] = result
return result.addCallback(self._map_got_local_ip, port)
def info(self, port):
"""
See interface
"""
# If the mapping exists, everything's ok
if port in self._mapped:
return self._mapped[port]
else:
raise ValueError('Port %r is not currently mapped'%(port))
def unmap(self, port):
"""
See interface
"""
if port in self._mapped:
existing = self._mapped[port]
#Pending mapping, queue an unmap,return existing deferred
if type(existing) is not tuple:
existing.addCallback(lambda x: self.unmap(port))
return existing
#Remove our local mapping
del self._mapped[port]
#Ask the UPnP to remove the mapping
extaddr, extport = existing
return self._upnp.remove_port_mapping(extport, port.getHost().type)
else:
raise ValueError('Port %r is not currently mapped'%(port))
def get_port_mappings(self):
"""
See interface
"""
return self._upnp.get_port_mappings()
def _map_got_local_ip(self, ip_result, port):
"""
We got the local ip address, retreive the existing port mappings
in the device.
@param ip_result: result of L{ipdiscover.get_local_ip}
@param port: a L{twisted.internet.interfaces.IListeningPort} we
want to map
"""
local, ip = ip_result
return self._upnp.get_port_mappings().addCallback(
self._map_got_port_mappings, ip, port)
def _map_got_port_mappings(self, mappings, ip, port):
"""
We got all the existing mappings in the device, find an unused one
and assign it for the requested port.
@param ip: The local ip of this host "x.x.x.x"
@param port: a L{twisted.internet.interfaces.IListeningPort} we
want to map
@param mappings: result of L{UPnPDevice.get_port_mappings}
"""
#Get the requested mapping's info
ptype = port.getHost().type
intport = port.getHost().port
for extport in [random.randrange(1025, 65536) for val in range(20)]:
# Check if there is an existing mapping, if it does not exist, bingo
if not (ptype, extport) in mappings:
break
if (ptype, extport) in mappings:
existing = mappings[ptype, extport]
local_ip, local_port = existing
if local_ip == ip and local_port == intport:
# Existing binding for this host/port/proto - replace it
break
# Triggers the creation of the mapping on the device
result = self._upnp.add_port_mapping(ip, intport, extport, 'pynupnp', ptype)
# We also need the external IP, so we queue first an
# External IP Discovery, then we add the mapping.
return result.addCallback(
lambda x: self._upnp.get_external_ip()).addCallback(
self._port_mapping_added, extport, port)
def _port_mapping_added(self, extaddr, extport, port):
"""
The port mapping was added in the device, this means::
Internet NAT LAN
|
> IP:extaddr |> IP:local ip
> Port:extport |> Port:port
|
@param extaddr: The exernal ip address
@param extport: The external port as number
@param port: The internal port as a
L{twisted.internet.interfaces.IListeningPort} object, that has been
mapped
"""
self._mapped[port] = (extaddr, extport)
return (extaddr, extport)
class UPnPDevice:
"""
Represents an UPnP device, with the associated infos, and remote methods.
"""
def __init__(self, soap_proxy, info):
"""
Build the device, with the given SOAP proxy, and the meta-infos.
@param soap_proxy: an initialized L{SoapProxy} to the device
@param info: a dictionnary of various infos concerning the
device extracted with L{UPnPXml}
"""
self._soap_proxy = soap_proxy
self._info = info
def get_external_ip(self):
"""
Triggers an external ip discovery on the upnp device. Returns
a deferred called with the external ip of this host.
@return: A deferred called with the ip address, as "x.x.x.x"
@rtype: L{twisted.internet.defer.Deferred}
"""
result = self._soap_proxy.call('GetExternalIPAddress')
result.addCallback(self._on_external_ip)
return result
def get_port_mappings(self):
"""
Retreive the existing port mappings
@see: L{portmapper.NATMapper.get_port_mappings}
@return: A deferred called with the dictionnary as defined
in the interface L{portmapper.NATMapper.get_port_mappings}
@rtype: L{twisted.internet.defer.Deferred}
"""
return self._get_port_mapping()
def add_port_mapping(self, local_ip, intport, extport, desc, proto, lease=0):
"""
Add a port mapping in the upnp device. Returns a deferred.
@param local_ip: the LAN ip of this host as "x.x.x.x"
@param intport: the internal port number
@param extport: the external port number
@param desc: the description of this mapping (string)
@param proto: "UDP" or "TCP"
@param lease: The duration of the lease in (mili)seconds(??)
@return: A deferred called with None when the mapping is done
@rtype: L{twisted.internet.defer.Deferred}
"""
result = self._soap_proxy.call('AddPortMapping', NewRemoteHost="",
NewExternalPort=extport,
NewProtocol=proto,
NewInternalPort=intport,
NewInternalClient=local_ip,
NewEnabled=1,
NewPortMappingDescription=desc,
NewLeaseDuration=lease)
return result.addCallbacks(self._on_port_mapping_added,
self._on_no_port_mapping_added)
def remove_port_mapping(self, extport, proto):
"""
Remove an existing port mapping on the device. Returns a deferred
@param extport: the external port number associated to the mapping
to be removed
@param proto: either "UDP" or "TCP"
@return: A deferred called with None when the mapping is done
@rtype: L{twisted.internet.defer.Deferred}
"""
result = self._soap_proxy.call('DeletePortMapping', NewRemoteHost="",
NewExternalPort=extport,
NewProtocol=proto)
return result.addCallbacks(self._on_port_mapping_removed,
self._on_no_port_mapping_removed)
# Private --------
def _on_external_ip(self, res):
"""
Called when we received the external ip address from the device.
@param res: the SOAPpy structure of the result
@return: the external ip string, as "x.x.x.x"
"""
logging.debug("Got external ip struct: %r", res)
return res['NewExternalIPAddress']
def _get_port_mapping(self, mapping_id=0, mappings=None):
"""
Fetch the existing mappings starting at index
"mapping_id" from the device.
To retreive all the mappings call this without parameters.
@param mapping_id: The index of the mapping to start fetching from
@param mappings: the dictionnary of already fetched mappings
@return: A deferred called with the existing mappings when all have been
retreived, see L{get_port_mappings}
@rtype: L{twisted.internet.defer.Deferred}
"""
if mappings == None:
mappings = {}
result = self._soap_proxy.call('GetGenericPortMappingEntry',
NewPortMappingIndex=mapping_id)
return result.addCallbacks(
lambda x: self._on_port_mapping_received(x, mapping_id+1, mappings),
lambda x: self._on_no_port_mapping_received( x, mappings))
def _on_port_mapping_received(self, response, mapping_id, mappings):
"""
Called we we receive a single mapping from the device.
@param response: a SOAPpy structure, representing the device's answer
@param mapping_id: The index of the next mapping in the device
@param mappings: the already fetched mappings, see L{get_port_mappings}
@return: A deferred called with the existing mappings when all have been
retreived, see L{get_port_mappings}
@rtype: L{twisted.internet.defer.Deferred}
"""
logging.debug("Got mapping struct: %r", response)
mappings[
response['NewProtocol'], response['NewExternalPort']
] = (response['NewInternalClient'], response['NewInternalPort'])
return self._get_port_mapping(mapping_id, mappings)
def _on_no_port_mapping_received(self, failure, mappings):
"""
Called when we have no more port mappings to retreive, or an
error occured while retreiving them.
Either we have a "SpecifiedArrayIndexInvalid" SOAP error, and that's ok,
it just means we have finished. If it returns some other error, then we
fail with an UPnPError.
@param mappings: the already retreived mappings
@param failure: the failure
@return: The existing mappings as defined in L{get_port_mappings}
@raise UPnPError: When we got any other error
than "SpecifiedArrayIndexInvalid"
"""
logging.debug("_on_no_port_mapping_received: %s", failure)
err = failure.value
message = err.args[0]["UPnPError"]["errorDescription"]
if "SpecifiedArrayIndexInvalid" == message:
return mappings
else:
return failure
def _on_port_mapping_added(self, response):
"""
The port mapping was successfully added, return None to the deferred.
"""
return None
def _on_no_port_mapping_added(self, failure):
"""
Called when the port mapping could not be added. Immediately
raise an UPnPError, with the SOAPpy structure inside.
@raise UPnPError: When the port mapping could not be added
"""
return failure
def _on_port_mapping_removed(self, response):
"""
The port mapping was successfully removed, return None to the deferred.
"""
return None
def _on_no_port_mapping_removed(self, failure):
"""
Called when the port mapping could not be removed. Immediately
raise an UPnPError, with the SOAPpy structure inside.
@raise UPnPError: When the port mapping could not be deleted
"""
return failure
# UPNP multicast address, port and request string
_UPNP_MCAST = '239.255.255.250'
_UPNP_PORT = 1900
_UPNP_SEARCH_REQUEST = """M-SEARCH * HTTP/1.1\r
Host:%s:%s\r
ST:urn:schemas-upnp-org:device:InternetGatewayDevice:1\r
Man:"ssdp:discover"\r
MX:3\r
\r
""" % (_UPNP_MCAST, _UPNP_PORT)
class UPnPProtocol(DatagramProtocol, object):
"""
The UPnP Device discovery udp multicast twisted protocol.
"""
def __init__(self, *args, **kwargs):
"""
Init the protocol, no parameters needed.
"""
super(UPnPProtocol, self).__init__(*args, **kwargs)
#Device discovery deferred
self._discovery = None
self._discovery_timeout = None
self.mcast = None
self._done = False
# Public methods
def search_device(self):
"""
Triggers a UPnP device discovery.
The returned deferred will be called with the L{UPnPDevice} that has
been found in the LAN.
@return: A deferred called with the detected L{UPnPDevice} instance.
@rtype: L{twisted.internet.defer.Deferred}
"""
if self._discovery is not None:
raise ValueError('already used')
self._discovery = defer.Deferred()
self._discovery_timeout = reactor.callLater(6, self._on_discovery_timeout)
attempt = 0
mcast = None
while True:
try:
self.mcast = reactor.listenMulticast(1900+attempt, self)
break
except CannotListenError:
attempt = random.randint(0, 500)
# joined multicast group, starting upnp search
self.mcast.joinGroup('239.255.255.250', socket.INADDR_ANY)
self.transport.write(_UPNP_SEARCH_REQUEST, (_UPNP_MCAST, _UPNP_PORT))
self.transport.write(_UPNP_SEARCH_REQUEST, (_UPNP_MCAST, _UPNP_PORT))
self.transport.write(_UPNP_SEARCH_REQUEST, (_UPNP_MCAST, _UPNP_PORT))
return self._discovery
#Private methods
def datagramReceived(self, dgram, address):
if self._done:
return
"""
This is private, handle the multicast answer from the upnp device.
"""
logging.debug("Got UPNP multicast search answer:\n%s", dgram)
#This is an HTTP response
response, message = dgram.split('\r\n', 1)
# Prepare status line
version, status, textstatus = response.split(None, 2)
if not version.startswith('HTTP'):
return
if status != "200":
return
# Launch the info fetching
def parse_discovery_response(message):
"""Separate headers and body from the received http answer."""
hdict = {}
body = ''
remaining = message
while remaining:
line, remaining = remaining.split('\r\n', 1)
line = line.strip()
if not line:
body = remaining
break
key, val = line.split(':', 1)
key = key.lower()
hdict.setdefault(key, []).append(val.strip())
return hdict, body
headers, body = parse_discovery_response(message)
if not 'location' in headers:
self._on_discovery_failed(
UPnPError(
"No location header in response to M-SEARCH!: %r"%headers))
return
loc = headers['location'][0]
result = client.getPage(url=loc)
result.addCallback(self._on_gateway_response, loc).addErrback(self._on_discovery_failed)
def _on_gateway_response(self, body, loc):
if self._done:
return
"""
Called with the UPnP device XML description fetched via HTTP.
If the device has suitable services for ip discovery and port mappings,
the callback returned in L{search_device} is called with
the discovered L{UPnPDevice}.
@raise UPnPError: When no suitable service has been
found in the description, or another error occurs.
@param body: The xml description of the device.
@param loc: the url used to retreive the xml description
"""
# Parse answer
upnpinfo = UPnPXml(body)
# Check if we have a base url, if not consider location as base url
urlbase = upnpinfo.urlbase
if urlbase == None:
urlbase = loc
# Check the control url, if None, then the device cannot do what we want
controlurl = upnpinfo.controlurl
if controlurl == None:
self._on_discovery_failed(UPnPError("upnp response showed no WANConnections"))
return
control_url2 = urlparse.urljoin(urlbase, controlurl)
soap_proxy = SoapProxy(control_url2, upnpinfo.wanservice)
self._on_discovery_succeeded(UPnPDevice(soap_proxy, upnpinfo.deviceinfos))
def _on_discovery_succeeded(self, res):
if self._done:
return
self._done = True
self.mcast.stopListening()
self._discovery_timeout.cancel()
self._discovery.callback(res)
def _on_discovery_failed(self, err):
if self._done:
return
self._done = True
self.mcast.stopListening()
self._discovery_timeout.cancel()
self._discovery.errback(err)
def _on_discovery_timeout(self):
if self._done:
return
self._done = True
self.mcast.stopListening()
self._discovery.errback(failure.Failure(defer.TimeoutError('in _on_discovery_timeout')))
def search_upnp_device ():
"""
Check the network for an UPnP device. Returns a deferred
with the L{UPnPDevice} instance as result, if found.
@return: A deferred called with the L{UPnPDevice} instance
@rtype: L{twisted.internet.defer.Deferred}
"""
return defer.maybeDeferred(UPnPProtocol().search_device)
| gpl-3.0 |
akhilari7/pa-dude | lib/python2.7/site-packages/bs4/builder/__init__.py | 73 | 11234 | from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
NAME = "[Unknown tree builder]"
ALTERNATE_NAMES = []
features = []
is_xml = False
picklable = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, basestring):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
| mit |
tanglu-org/tgl-misago | misago/templatetags/utils.py | 3 | 1531 | from django_jinja.library import Library
from haystack.utils import Highlighter
from misago.utils import colors
from misago.utils.strings import short_string
register = Library()
@register.global_function(name='intersect')
def intersect(list_a, list_b):
for i in list_a:
if i in list_b:
return True
return False
@register.filter(name='short_string')
def make_short(string, length=16):
return short_string(string, length)
@register.filter(name='filesize')
def format_filesize(size):
try:
for u in ('B','KB','MB','GB','TB'):
if size < 1024.0:
return "%3.1f %s" % (size, u)
size /= 1024.0
except ValueError:
return '0 B'
@register.filter(name='highlight')
def highlight_result(text, query, length=500):
hl = Highlighter(query, html_tag='strong', max_length=length)
hl = hl.highlight(text)
return hl
@register.global_function(name='color_spin')
def spin_color_filter(color, spin):
return colors.spin(color, spin)
@register.global_function(name='color_desaturate')
def desaturate_color_filter(color, steps, step, minimum=0.0):
return colors.desaturate(color, steps, step, minimum)
@register.global_function(name='color_lighten')
def lighten_color_filter(color, steps, step, maximum=100.0):
return colors.lighten(color, steps, step, maximum)
@register.global_function(name='color_darken')
def darken_color_filter(color, steps, step, minimum=0.0):
return colors.darken(color, steps, step, minimum) | gpl-3.0 |
nietao2/things | kafkaClient/node_modules/kafka-node/node_modules/snappy/node_modules/node-gyp/gyp/gyptest.py | 1752 | 8019 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 |
CuonDeveloper/cuon | cuon_client/cuon/Databases/SingleExport.py | 3 | 1725 | # -*- coding: utf-8 -*-
##Copyright (C) [2005] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from cuon.Databases.SingleData import SingleData
import logging
try:
import pygtk
pygtk.require('2.0')
import gtk
import gobject
except:
pass
class SingleExport(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
#self.sNameOfTable = "biblio"
self.xmlTableDef = 0
# self.loadTable()
# self.saveTable()
self.allTables = allTables
#self.loadTable(allTables)
#self.setStore( gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_UINT) )
#self.listHeader['names'] = ['title', 'designation', 'ID']
#self.listHeader['size'] = [25,10,25,25,10]
#print "number of Columns "
#print len(self.table.Columns)
#
self.tree1 = None
def setExportTable(self,dname):
self.sNameOfTable = dname
self.loadTable(self.allTables)
| gpl-3.0 |
doctorOb/thoughtsbydrob | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/filters/__init__.py | 196 | 11499 | # -*- coding: utf-8 -*-
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""
Lookup a filter by name. Return None if not found.
"""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""
Return an instantiated filter. Options are passed to the filter
initializer if wanted. Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""
Return a generator of all filter names.
"""
for name in FILTERS:
yield name
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""
Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""
Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(unicode, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""
Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""
Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""
Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in {'spaces': u'·', 'tabs': u'»', 'newlines': u'¶'}.items():
opt = options.get(name, False)
if isinstance(opt, basestring) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' '*(tabsize-1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or ' '
tabs = self.tabs or '\t'
newlines = self.newlines or '\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""
Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
*New in Pygments 1.2.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return '', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = '\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""
Merges consecutive tokens with the same token type in the output stream of a
lexer.
*New in Pygments 1.2.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
}
| mit |
dancingdan/tensorflow | tensorflow/tools/docs/doc_generator_visitor_test.py | 28 | 7383 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tools.docs.doc_generator_visitor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import generate_lib
class NoDunderVisitor(doc_generator_visitor.DocGeneratorVisitor):
def __call__(self, parent_name, parent, children):
"""Drop all the dunder methods to make testing easier."""
children = [
(name, obj) for (name, obj) in children if not name.startswith('_')
]
super(NoDunderVisitor, self).__call__(parent_name, parent, children)
class DocGeneratorVisitorTest(googletest.TestCase):
def test_call_module(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
visitor(
'doc_generator_visitor', doc_generator_visitor,
[('DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor)])
self.assertEqual({'doc_generator_visitor': ['DocGeneratorVisitor']},
visitor.tree)
self.assertEqual({
'doc_generator_visitor': doc_generator_visitor,
'doc_generator_visitor.DocGeneratorVisitor':
doc_generator_visitor.DocGeneratorVisitor,
}, visitor.index)
def test_call_class(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
visitor(
'DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor,
[('index', doc_generator_visitor.DocGeneratorVisitor.index)])
self.assertEqual({'DocGeneratorVisitor': ['index']},
visitor.tree)
self.assertEqual({
'DocGeneratorVisitor': doc_generator_visitor.DocGeneratorVisitor,
'DocGeneratorVisitor.index':
doc_generator_visitor.DocGeneratorVisitor.index
}, visitor.index)
def test_call_raises(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
with self.assertRaises(RuntimeError):
visitor('non_class_or_module', 'non_class_or_module_object', [])
def test_duplicates_module_class_depth(self):
class Parent(object):
class Nested(object):
pass
tf = types.ModuleType('tf')
tf.Parent = Parent
tf.submodule = types.ModuleType('submodule')
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent':
sorted([
'tf.Parent',
'tf.submodule.Parent',
]),
'tf.submodule.Parent.Nested':
sorted([
'tf.Parent.Nested',
'tf.submodule.Parent.Nested',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.Parent.Nested': 'tf.submodule.Parent.Nested',
'tf.Parent': 'tf.submodule.Parent',
}, visitor.duplicate_of)
self.assertEqual({
id(Parent): 'tf.submodule.Parent',
id(Parent.Nested): 'tf.submodule.Parent.Nested',
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
}, visitor.reverse_index)
def test_duplicates_contrib(self):
class Parent(object):
pass
tf = types.ModuleType('tf')
tf.contrib = types.ModuleType('contrib')
tf.submodule = types.ModuleType('submodule')
tf.contrib.Parent = Parent
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent':
sorted(['tf.contrib.Parent', 'tf.submodule.Parent']),
}, visitor.duplicates)
self.assertEqual({
'tf.contrib.Parent': 'tf.submodule.Parent',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(Parent): 'tf.submodule.Parent',
id(tf.contrib): 'tf.contrib',
}, visitor.reverse_index)
def test_duplicates_defining_class(self):
class Parent(object):
obj1 = object()
class Child(Parent):
pass
tf = types.ModuleType('tf')
tf.Parent = Parent
tf.Child = Child
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.Parent.obj1': sorted([
'tf.Parent.obj1',
'tf.Child.obj1',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.Child.obj1': 'tf.Parent.obj1',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(Parent): 'tf.Parent',
id(Child): 'tf.Child',
id(Parent.obj1): 'tf.Parent.obj1',
}, visitor.reverse_index)
def test_duplicates_module_depth(self):
class Parent(object):
pass
tf = types.ModuleType('tf')
tf.submodule = types.ModuleType('submodule')
tf.submodule.submodule2 = types.ModuleType('submodule2')
tf.Parent = Parent
tf.submodule.submodule2.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.Parent': sorted(['tf.Parent', 'tf.submodule.submodule2.Parent']),
}, visitor.duplicates)
self.assertEqual({
'tf.submodule.submodule2.Parent': 'tf.Parent'
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(tf.submodule.submodule2): 'tf.submodule.submodule2',
id(Parent): 'tf.Parent',
}, visitor.reverse_index)
def test_duplicates_name(self):
class Parent(object):
obj1 = object()
Parent.obj2 = Parent.obj1
tf = types.ModuleType('tf')
tf.submodule = types.ModuleType('submodule')
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent.obj1':
sorted([
'tf.submodule.Parent.obj1',
'tf.submodule.Parent.obj2',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.submodule.Parent.obj2': 'tf.submodule.Parent.obj1',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(Parent): 'tf.submodule.Parent',
id(Parent.obj1): 'tf.submodule.Parent.obj1',
}, visitor.reverse_index)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
quattor/aquilon | lib/aquilon/worker/commands/update_vendor.py | 2 | 1119 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aquilon.aqdb.model import Vendor
from aquilon.worker.broker import BrokerCommand
class CommandUpdateVendor(BrokerCommand):
required_parameters = ["vendor"]
def render(self, session, vendor, comments, **_):
dbvendor = Vendor.get_unique(session, vendor, compel=True)
if comments is not None:
dbvendor.comments = comments
session.flush()
return
| apache-2.0 |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/eugene/estimation/HLCM_specification.py | 2 | 3884 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
#######
# In a command line, you can estimate using this command:
#
# python urbansim/tools/start_estimation.py -c eugene.configs.baseline_estimation --model=household_location_choice_model -s eugene.estimation.HLCM_specification
#
# see
# python urbansim/tools/start_estimation.py --help
# for other options
#######
specification = {}
#
# ############################# Residential ##############################
#
specification = {
-2: #submodel_id
[
# "lnhousing_cost = ln(urbansim.gridcell.housing_cost)",
"urbansim.household_x_gridcell.cost_to_income_ratio",
# "urbansim.household_x_gridcell.housing_affordability",
"urbansim.household_x_gridcell.income_and_year_built",
# "urbansim.household_x_gridcell.income_and_ln_residential_units",
# "urbansim.household_x_gridcell.income_and_percent_residential",
"urbansim.household_x_gridcell.percent_high_income_households_within_walking_distance_if_low_income",
"urbansim.household_x_gridcell.percent_low_income_households_within_walking_distance_if_high_income",
"urbansim.household_x_gridcell.young_household_in_high_density_residential",
"urbansim.household_x_gridcell.young_household_in_mixed_use",
# "urbansim.gridcell.is_near_highway",
#"urbansim.gridcell.is_near_arterial",
# "ln_bounded(urbansim.household_x_gridcell.income_less_housing_cost)",
# "urbansim.gridcell.travel_time_to_CBD",
# "urbansim.gridcell.acres_open_space_within_walking_distance",
# "urbansim.household_x_gridcell.income_and_ln_improvement_value_per_unit",
# "ltotal_resvalue_per_ru_wwd = ln(urbansim.gridcell.total_residential_value_per_residential_unit_within_walking_distance)",
# "ltotal_impvalue_per_ru_wwd = ln(urbansim.gridcell.total_improvement_value_per_residential_unit_within_walking_distance)",
# "urbansim.gridcell.ln_residential_units",
# "urbansim.gridcell.ln_residential_units_within_walking_distance",\
#"urbansim.gridcell.ln_service_sector_employment_within_walking_distance",\
#"urbansim.gridcell.ln_basic_sector_employment_within_walking_distance",\
#"urbansim.gridcell.ln_retail_sector_employment_within_walking_distance",
#"urbansim.household_x_gridcell.percent_high_income_households_within_walking_distance_if_high_income",\
#"urbansim.household_x_gridcell.percent_low_income_households_within_walking_distance_if_low_income",\
#"urbansim.household_x_gridcell.percent_mid_income_households_within_walking_distance_if_mid_income", \
"urbansim.household_x_gridcell.percent_minority_households_within_walking_distance_if_minority",\
#"urbansim.household_x_gridcell.percent_minority_households_within_walking_distance_if_not_minority",\
"urbansim.household_x_gridcell.residential_units_when_household_has_children",\
# "urbansim.household_x_gridcell.utility_for_transit_walk_0_cars",
# "urbansim.gridcell.ln_home_access_to_employment_1",\
# "urbansim.household_x_gridcell.same_household_age_in_faz",
#"utility_for_transit_walk_0_cars",\
#"utility_for_transit_walk_1_person"
#"ln_access_from_residence_to_workplaces",\
#"trip_weighted_average_utility_hbw_from_home_am_income_1",\
#"trip_weighted_average_utility_hbw_from_home_am_income_2",\
#"trip_weighted_average_utility_hbw_from_home_am_income_3",\
#"trip_weighted_average_utility_hbw_from_home_am_income_4"\
]
} | gpl-2.0 |
traveloka/ansible | lib/ansible/modules/packaging/language/cpanm.py | 23 | 7010 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Franck Cuny <franck@lumberjaph.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cpanm
short_description: Manages Perl library dependencies.
description:
- Manage Perl library dependencies.
version_added: "1.6"
options:
name:
description:
- The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
required: false
default: null
aliases: ["pkg"]
from_path:
description:
- The local directory from where to install
required: false
default: null
notest:
description:
- Do not run unit tests
required: false
default: false
locallib:
description:
- Specify the install base to install modules
required: false
default: false
mirror:
description:
- Specifies the base URL for the CPAN mirror to use
required: false
default: false
mirror_only:
description:
- Use the mirror's index file instead of the CPAN Meta DB
required: false
default: false
installdeps:
description:
- Only install dependencies
required: false
default: false
version_added: "2.0"
version:
description:
- minimum version of perl module to consider acceptable
required: false
default: false
version_added: "2.1"
system_lib:
description:
- Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work.
- This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation.
required: false
default: false
version_added: "2.0"
aliases: ['use_sudo']
executable:
description:
- Override the path to the cpanm executable
required: false
default: null
version_added: "2.1"
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
author: "Franck Cuny (@franckcuny)"
'''
EXAMPLES = '''
# install Dancer perl package
- cpanm:
name: Dancer
# install version 0.99_05 of the Plack perl package
- cpanm:
name: MIYAGAWA/Plack-0.99_05.tar.gz
# install Dancer into the specified locallib
- cpanm:
name: Dancer
locallib: /srv/webapps/my_app/extlib
# install perl dependencies from local directory
- cpanm:
from_path: /srv/webapps/my_app/src/
# install Dancer perl package without running the unit tests in indicated locallib
- cpanm:
name: Dancer
notest: True
locallib: /srv/webapps/my_app/extlib
# install Dancer perl package from a specific mirror
- cpanm:
name: Dancer
mirror: 'http://cpan.cpantesters.org/'
# install Dancer perl package into the system root path
- cpanm:
name: Dancer
system_lib: yes
# install Dancer if it's not already installed
# OR the installed version is older than version 1.0
- cpanm:
name: Dancer
version: '1.0'
'''
def _is_package_installed(module, name, locallib, cpanm, version):
cmd = ""
if locallib:
os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
cmd = "%s perl -e ' use %s" % (cmd, name)
if version:
cmd = "%s %s;'" % (cmd, version)
else:
cmd = "%s;'" % cmd
res, stdout, stderr = module.run_command(cmd, check_rc=False)
if res == 0:
return True
else:
return False
def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
# this code should use "%s" like everything else and just return early but not fixing all of it now.
# don't copy stuff like this
if from_path:
cmd = cpanm + " " + from_path
else:
cmd = cpanm + " " + name
if notest is True:
cmd = cmd + " -n"
if locallib is not None:
cmd = cmd + " -l " + locallib
if mirror is not None:
cmd = cmd + " --mirror " + mirror
if mirror_only is True:
cmd = cmd + " --mirror-only"
if installdeps is True:
cmd = cmd + " --installdeps"
if use_sudo is True:
cmd = cmd + " --sudo"
return cmd
def _get_cpanm_path(module):
if module.params['executable']:
return module.params['executable']
else:
return module.get_bin_path('cpanm', True)
def main():
arg_spec = dict(
name=dict(default=None, required=False, aliases=['pkg']),
from_path=dict(default=None, required=False, type='path'),
notest=dict(default=False, type='bool'),
locallib=dict(default=None, required=False, type='path'),
mirror=dict(default=None, required=False),
mirror_only=dict(default=False, type='bool'),
installdeps=dict(default=False, type='bool'),
system_lib=dict(default=False, type='bool', aliases=['use_sudo']),
version=dict(default=None, required=False),
executable=dict(required=False, type='path'),
)
module = AnsibleModule(
argument_spec=arg_spec,
required_one_of=[['name', 'from_path']],
)
cpanm = _get_cpanm_path(module)
name = module.params['name']
from_path = module.params['from_path']
notest = module.boolean(module.params.get('notest', False))
locallib = module.params['locallib']
mirror = module.params['mirror']
mirror_only = module.params['mirror_only']
installdeps = module.params['installdeps']
use_sudo = module.params['system_lib']
version = module.params['version']
changed = False
installed = _is_package_installed(module, name, locallib, cpanm, version)
if not installed:
cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
if rc_cpanm != 0:
module.fail_json(msg=err_cpanm, cmd=cmd)
if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1):
changed = True
module.exit_json(changed=changed, binary=cpanm, name=name)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/google/appengine/_internal/antlr3/extras.py | 23 | 1940 | """ @package antlr3.dottreegenerator
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
from treewizard import TreeWizard
try:
from google.appengine._internal.antlr3.dottreegen import toDOT
except ImportError, exc:
def toDOT(*args, **kwargs):
raise exc
| mit |
adykstra/mne-python | mne/beamformer/tests/test_dics.py | 1 | 26564 | # Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Britta Westner <britta.wstnr@gmail.com>
#
# License: BSD 3 clause
import copy as cp
import os.path as op
import pytest
from numpy.testing import assert_array_equal, assert_allclose
import numpy as np
import mne
from mne.datasets import testing
from mne.beamformer import (make_dics, apply_dics, apply_dics_epochs,
apply_dics_csd, tf_dics, read_beamformer,
Beamformer)
from mne.time_frequency import csd_morlet
from mne.utils import run_tests_if_main, object_diff, requires_h5py
from mne.proj import compute_proj_evoked, make_projector
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
subjects_dir = op.join(data_path, 'subjects')
fname_label = op.join(subjects_dir, 'sample', 'label', 'aparc',
'rostralmiddlefrontal-lh.label')
@pytest.fixture(scope='module', params=[testing._pytest_param()])
def _load_forward():
"""Load forward models."""
fwd_free = mne.read_forward_solution(fname_fwd)
fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False)
fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False)
fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True,
use_cps=False)
fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True,
use_cps=False)
fwd_vol = mne.read_forward_solution(fname_fwd_vol)
label = mne.read_label(fname_label)
return fwd_free, fwd_surf, fwd_fixed, fwd_vol, label
def _simulate_data(fwd):
"""Simulate an oscillator on the cortex."""
source_vertno = 146374 # Somewhere on the frontal lobe
sfreq = 50. # Hz.
times = np.arange(10 * sfreq) / sfreq # 10 seconds of data
signal = np.sin(20 * 2 * np.pi * times) # 20 Hz oscillator
signal[:len(times) // 2] *= 2 # Make signal louder at the beginning
signal *= 1e-9 # Scale to be in the ballpark of MEG data
# Construct a SourceEstimate object that describes the signal at the
# cortical level.
stc = mne.SourceEstimate(
signal[np.newaxis, :],
vertices=[[source_vertno], []],
tmin=0,
tstep=1 / sfreq,
subject='sample',
)
# Create an info object that holds information about the sensors
info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
info.update(fwd['info']) # Merge in sensor position information
# heavily decimate sensors to make it much faster
info = mne.pick_info(info, np.arange(info['nchan'])[::5])
fwd = mne.pick_channels_forward(fwd, info['ch_names'])
# Run the simulated signal through the forward model, obtaining
# simulated sensor data.
raw = mne.apply_forward_raw(fwd, stc, info)
# Add a little noise
random = np.random.RandomState(42)
noise = random.randn(*raw._data.shape) * 1e-14
raw._data += noise
# Define a single epoch
epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0,
tmax=raw.times[-1], preload=True)
evoked = epochs.average()
# Compute the cross-spectral density matrix
csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10)
return epochs, evoked, csd, source_vertno
def _test_weight_norm(filters, norm=1):
"""Test weight normalization."""
for ws in filters['weights']:
ws = ws.reshape(-1, filters['n_orient'], ws.shape[1])
for w in ws:
assert_allclose(np.trace(w.dot(w.T)), norm)
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_h5py
def test_make_dics(tmpdir, _load_forward):
"""Test making DICS beamformer filters."""
# We only test proper handling of parameters here. Testing the results is
# done in test_apply_dics_timeseries and test_apply_dics_csd.
fwd_free, fwd_surf, fwd_fixed, fwd_vol, label = _load_forward
epochs, _, csd, _ = _simulate_data(fwd_fixed)
with pytest.raises(RuntimeError, match='several sensor types'):
make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None)
epochs.pick_types(meg='grad')
with pytest.raises(ValueError, match="Invalid value for the 'pick_ori'"):
make_dics(epochs.info, fwd_fixed, csd, pick_ori="notexistent")
with pytest.raises(ValueError, match='rank, if str'):
make_dics(epochs.info, fwd_fixed, csd, rank='foo')
with pytest.raises(TypeError, match='rank must be'):
make_dics(epochs.info, fwd_fixed, csd, rank=1.)
# Test if fixed forward operator is detected when picking normal
# orientation
with pytest.raises(ValueError, match='forward operator with free ori'):
make_dics(epochs.info, fwd_fixed, csd, pick_ori="normal")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
with pytest.raises(ValueError, match='oriented in surface coordinates'):
make_dics(epochs.info, fwd_free, csd, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
with pytest.raises(ValueError, match='oriented in surface coordinates'):
make_dics(epochs.info, fwd_vol, csd, pick_ori="normal")
# Test invalid combinations of parameters
with pytest.raises(NotImplementedError, match='implemented with pick_ori'):
make_dics(epochs.info, fwd_free, csd, reduce_rank=True, pick_ori=None)
with pytest.raises(NotImplementedError, match='implemented with pick_ori'):
make_dics(epochs.info, fwd_free, csd, reduce_rank=True,
pick_ori='max-power', inversion='single')
with pytest.raises(ValueError, match='not stable with depth'):
make_dics(epochs.info, fwd_free, csd, weight_norm='unit-noise-gain',
inversion='single', normalize_fwd=True)
# Sanity checks on the returned filters
n_freq = len(csd.frequencies)
vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
n_verts = len(vertices)
n_orient = 3
n_channels = len(epochs.ch_names)
# Test return values
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm='unit-noise-gain', normalize_fwd=False)
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert np.iscomplexobj(filters['weights'])
assert filters['csd'] == csd
assert filters['ch_names'] == epochs.ch_names
assert_array_equal(filters['proj'], np.eye(n_channels))
assert_array_equal(filters['vertices'][0], vertices)
assert_array_equal(filters['vertices'][1], []) # Label was on the LH
assert filters['subject'] == fwd_free['src'][0]['subject_his_id']
assert filters['pick_ori'] is None
assert filters['n_orient'] == n_orient
assert filters['inversion'] == 'single'
assert not filters['normalize_fwd']
assert filters['weight_norm'] == 'unit-noise-gain'
assert 'DICS' in repr(filters)
assert 'subject "sample"' in repr(filters)
assert '13' in repr(filters)
assert str(n_channels) in repr(filters)
assert 'rank' not in repr(filters)
_test_weight_norm(filters)
# Test picking orientations. Also test weight norming under these different
# conditions.
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='normal', weight_norm='unit-noise-gain',
normalize_fwd=False)
n_orient = 1
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert filters['n_orient'] == n_orient
_test_weight_norm(filters)
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='max-power', weight_norm='unit-noise-gain',
normalize_fwd=False)
n_orient = 1
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert filters['n_orient'] == n_orient
_test_weight_norm(filters)
# From here on, only work on a single frequency
csd = csd[0]
# Test using a real-valued filter
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='normal', real_filter=True)
assert not np.iscomplexobj(filters['weights'])
# Test forward normalization. When inversion='single', the power of a
# unit-noise CSD should be 1, even without weight normalization.
csd_noise = csd.copy()
inds = np.triu_indices(csd.n_channels)
# Using [:, :] syntax for in-place broadcasting
csd_noise._data[:, :] = np.eye(csd.n_channels)[inds][:, np.newaxis]
filters = make_dics(epochs.info, fwd_surf, csd_noise, label=label,
weight_norm=None, normalize_fwd=True)
w = filters['weights'][0][:3]
assert_allclose(np.diag(w.dot(w.T)), 1.0, rtol=1e-6, atol=0)
# Test turning off both forward and weight normalization
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
weight_norm=None, normalize_fwd=False)
w = filters['weights'][0][:3]
assert not np.allclose(np.diag(w.dot(w.T)), 1.0, rtol=1e-2, atol=0)
# Test neural-activity-index weight normalization. It should be a scaled
# version of the unit-noise-gain beamformer.
filters_nai = make_dics(
epochs.info, fwd_surf, csd, label=label, pick_ori='max-power',
weight_norm='nai', normalize_fwd=False)
w_nai = filters_nai['weights'][0]
filters_ung = make_dics(
epochs.info, fwd_surf, csd, label=label, pick_ori='max-power',
weight_norm='unit-noise-gain', normalize_fwd=False)
w_ung = filters_ung['weights'][0]
assert np.allclose(np.corrcoef(np.abs(w_nai).ravel(),
np.abs(w_ung).ravel()), 1)
# Test whether spatial filter contains src_type
assert 'src_type' in filters
fname = op.join(str(tmpdir), 'filters-dics.h5')
filters.save(fname)
filters_read = read_beamformer(fname)
assert isinstance(filters, Beamformer)
assert isinstance(filters_read, Beamformer)
for key in ['tmin', 'tmax']: # deal with strictness of object_diff
setattr(filters['csd'], key, np.float(getattr(filters['csd'], key)))
assert object_diff(filters, filters_read) == ''
def test_apply_dics_csd(_load_forward):
"""Test applying a DICS beamformer to a CSD matrix."""
fwd_free, fwd_surf, fwd_fixed, _, label = _load_forward
epochs, _, csd, source_vertno = _simulate_data(fwd_fixed)
vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
source_ind = vertices.tolist().index(source_vertno)
reg = 1 # Lots of regularization for our toy dataset
with pytest.raises(RuntimeError, match='several sensor types'):
make_dics(epochs.info, fwd_free, csd)
epochs.pick_types(meg='grad')
# Try different types of forward models
assert label.hemi == 'lh'
assert vertices[source_ind] == source_vertno
rr_want = fwd_free['src'][0]['rr'][source_vertno]
for fwd in [fwd_free, fwd_surf, fwd_fixed]:
filters = make_dics(epochs.info, fwd, csd, label=label, reg=reg,
inversion='single')
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
# Did we find the true source at 20 Hz?
idx = np.argmax(power.data[:, 1])
rr_got = fwd_free['src'][0]['rr'][vertices[idx]]
dist = np.linalg.norm(rr_got - rr_want)
assert dist == 0.
# Is the signal stronger at 20 Hz than 10?
assert power.data[source_ind, 1] > power.data[source_ind, 0]
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'max-power'])
@pytest.mark.parametrize('inversion', ['single', 'matrix'])
def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion):
"""Testpicking different orientations and inversion modes."""
fwd_free, fwd_surf, fwd_fixed, fwd_vol, label = _load_forward
epochs, _, csd, source_vertno = _simulate_data(fwd_fixed)
epochs.pick_types('grad')
vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
source_ind = vertices.tolist().index(source_vertno)
rr_want = fwd_free['src'][0]['rr'][source_vertno]
reg_ = 5 if inversion == 'matrix' else 1
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
reg=reg_, pick_ori=pick_ori,
inversion=inversion, normalize_fwd=False,
weight_norm='unit-noise-gain')
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
idx = np.argmax(power.data[:, 1])
rr_got = fwd_free['src'][0]['rr'][vertices[idx]]
dist = np.linalg.norm(rr_got - rr_want)
assert dist <= (0.03 if inversion == 'matrix' else 0.)
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test unit-noise-gain weighting
csd_noise = csd.copy()
inds = np.triu_indices(csd.n_channels)
csd_noise._data[...] = np.eye(csd.n_channels)[inds][:, np.newaxis]
noise_power, f = apply_dics_csd(csd_noise, filters)
assert np.allclose(noise_power.data, 1)
# Test filter with forward normalization instead of weight
# normalization
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
reg=reg_, pick_ori=pick_ori,
inversion=inversion, weight_norm=None,
normalize_fwd=True)
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
idx = np.argmax(power.data[:, 1])
rr_got = fwd_free['src'][0]['rr'][vertices[idx]]
dist = np.linalg.norm(rr_got - rr_want)
assert dist <= (0.035 if inversion == 'matrix' else 0.)
assert power.data[source_ind, 1] > power.data[source_ind, 0]
def test_real(_load_forward):
"""Test using a real-valued filter."""
fwd_free, fwd_surf, fwd_fixed, fwd_vol, label = _load_forward
epochs, _, csd, source_vertno = _simulate_data(fwd_fixed)
vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
source_ind = vertices.tolist().index(source_vertno)
rr_want = fwd_free['src'][0]['rr'][source_vertno]
epochs.pick_types('grad')
reg = 1 # Lots of regularization for our toy dataset
filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg,
real_filter=True)
# Also test here that no warings are thrown - implemented to check whether
# src should not be None warning occurs:
with pytest.warns(None) as w:
power, f = apply_dics_csd(csd, filters_real)
assert len(w) == 0
assert f == [10, 20]
assert np.argmax(power.data[:, 1]) == source_ind
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test rank reduction
filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=5,
pick_ori='max-power', inversion='matrix',
reduce_rank=True)
power, f = apply_dics_csd(csd, filters_real)
assert f == [10, 20]
idx = np.argmax(power.data[:, 1])
rr_got = fwd_free['src'][0]['rr'][vertices[idx]]
dist = np.linalg.norm(rr_got - rr_want)
assert dist <= 0.02
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test computing source power on a volume source space
filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg)
power, f = apply_dics_csd(csd, filters_vol)
vol_source_ind = 3851 # FIXME: not make this hardcoded
assert f == [10, 20]
assert np.argmax(power.data[:, 1]) == vol_source_ind
assert power.data[vol_source_ind, 1] > power.data[vol_source_ind, 0]
# check whether a filters object without src_type throws expected warning
del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning
with pytest.warns(RuntimeWarning, match='spatial filter does not contain '
'src_type'):
apply_dics_csd(csd, filters_vol)
@pytest.mark.filterwarnings("ignore:The use of several sensor types with the"
":RuntimeWarning")
def test_apply_dics_timeseries(_load_forward):
"""Test DICS applied to timeseries data."""
fwd_free, fwd_surf, fwd_fixed, fwd_vol, label = _load_forward
epochs, evoked, csd, source_vertno = _simulate_data(fwd_fixed)
vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
source_ind = vertices.tolist().index(source_vertno)
reg = 5 # Lots of regularization for our toy dataset
with pytest.raises(RuntimeError, match='several sensor types'):
make_dics(evoked.info, fwd_surf, csd)
evoked.pick_types(meg='grad')
multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label,
reg=reg)
# Sanity checks on the resulting STC after applying DICS on evoked
stcs = apply_dics(evoked, multiple_filters)
assert isinstance(stcs, list)
assert len(stcs) == len(multiple_filters['weights'])
assert_array_equal(stcs[0].vertices[0], multiple_filters['vertices'][0])
assert_array_equal(stcs[0].vertices[1], multiple_filters['vertices'][1])
assert_allclose(stcs[0].times, evoked.times)
# Applying filters for multiple frequencies on epoch data should fail
with pytest.raises(ValueError, match='computed for a single frequency'):
apply_dics_epochs(epochs, multiple_filters)
# From now on, only apply filters with a single frequency (20 Hz).
csd20 = csd.pick_frequency(20)
filters = make_dics(evoked.info, fwd_surf, csd20, label=label, reg=reg)
# Sanity checks on the resulting STC after applying DICS on epochs.
# Also test here that no warnings are thrown - implemented to check whether
# src should not be None warning occurs
with pytest.warns(None) as w:
stcs = apply_dics_epochs(epochs, filters)
assert len(w) == 0
assert isinstance(stcs, list)
assert len(stcs) == 1
assert_array_equal(stcs[0].vertices[0], filters['vertices'][0])
assert_array_equal(stcs[0].vertices[1], filters['vertices'][1])
assert_allclose(stcs[0].times, epochs.times)
# Did we find the source?
stc = (stcs[0] ** 2).mean()
assert np.argmax(stc.data) == source_ind
# Apply filters to evoked
stc = apply_dics(evoked, filters)
stc = (stc ** 2).mean()
assert np.argmax(stc.data) == source_ind
# Test if wrong channel selection is detected in application of filter
evoked_ch = cp.deepcopy(evoked)
evoked_ch.pick_channels(evoked_ch.ch_names[:-1])
with pytest.raises(ValueError, match='MEG 2633 which is not present'):
apply_dics(evoked_ch, filters)
# Test whether projections are applied, by adding a custom projection
filters_noproj = make_dics(evoked.info, fwd_surf, csd20, label=label)
stc_noproj = apply_dics(evoked, filters_noproj)
evoked_proj = evoked.copy()
p = compute_proj_evoked(evoked_proj, n_grad=1, n_mag=0, n_eeg=0)
proj_matrix = make_projector(p, evoked_proj.ch_names)[0]
evoked_proj.info['projs'] += p
filters_proj = make_dics(evoked_proj.info, fwd_surf, csd20, label=label)
assert_array_equal(filters_proj['proj'], proj_matrix)
stc_proj = apply_dics(evoked_proj, filters_proj)
assert np.any(np.not_equal(stc_noproj.data, stc_proj.data))
# Test detecting incompatible projections
filters_proj['proj'] = filters_proj['proj'][:-1, :-1]
with pytest.raises(ValueError, match='operands could not be broadcast'):
apply_dics(evoked_proj, filters_proj)
# Test returning a generator
stcs = apply_dics_epochs(epochs, filters, return_generator=False)
stcs_gen = apply_dics_epochs(epochs, filters, return_generator=True)
assert_array_equal(stcs[0].data, next(stcs_gen).data)
# Test computing timecourses on a volume source space
filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg)
stc = apply_dics(evoked, filters_vol)
stc = (stc ** 2).mean()
assert np.argmax(stc.data) == 3851 # TODO: don't make this hard coded
# check whether a filters object without src_type throws expected warning
del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning
with pytest.warns(RuntimeWarning, match='filter does not contain src_typ'):
apply_dics_epochs(epochs, filters_vol)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_tf_dics(_load_forward):
"""Test 5D time-frequency beamforming based on DICS."""
fwd_free, fwd_surf, fwd_fixed, _, label = _load_forward
epochs, _, _, source_vertno = _simulate_data(fwd_fixed)
vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
source_ind = vertices.tolist().index(source_vertno)
reg = 1 # Lots of regularization for our toy dataset
tmin = 0
tmax = 9
tstep = 4
win_lengths = [5, 5]
frequencies = [10, 20]
freq_bins = [(8, 12), (18, 22)]
with pytest.raises(RuntimeError, match='several sensor types'):
stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
freq_bins=freq_bins, frequencies=frequencies,
decim=10, reg=reg, label=label)
epochs.pick_types(meg='grad')
# Compute DICS for two time windows and two frequencies
for mode in ['fourier', 'multitaper', 'cwt_morlet']:
stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode=mode, freq_bins=freq_bins, frequencies=frequencies,
decim=10, reg=reg, label=label)
# Did we find the true source at 20 Hz?
assert np.argmax(stcs[1].data[:, 0]) == source_ind
assert np.argmax(stcs[1].data[:, 1]) == source_ind
# 20 Hz power should decrease over time
assert stcs[1].data[source_ind, 0] > stcs[1].data[source_ind, 1]
# 20 Hz power should be more than 10 Hz power at the true source
assert stcs[1].data[source_ind, 0] > stcs[0].data[source_ind, 0]
# Manually compute source power and compare with the last tf_dics result.
source_power = []
time_windows = [(0, 5), (4, 9)]
for time_window in time_windows:
csd = csd_morlet(epochs, frequencies=[frequencies[1]],
tmin=time_window[0], tmax=time_window[1], decim=10)
csd = csd.sum()
csd._data /= csd.n_fft
filters = make_dics(epochs.info, fwd_surf, csd, reg=reg, label=label)
stc_source_power, _ = apply_dics_csd(csd, filters)
source_power.append(stc_source_power.data)
# Comparing tf_dics results with dics_source_power results
assert_allclose(stcs[1].data, np.array(source_power).squeeze().T, atol=0)
# Test using noise csds. We're going to use identity matrices. That way,
# since we're using unit-noise-gain weight normalization, there should be
# no effect.
stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode='cwt_morlet', frequencies=frequencies, decim=10,
reg=reg, label=label, normalize_fwd=False,
weight_norm='unit-noise-gain')
noise_csd = csd.copy()
inds = np.triu_indices(csd.n_channels)
# Using [:, :] syntax for in-place broadcasting
noise_csd._data[:, :] = 2 * np.eye(csd.n_channels)[inds][:, np.newaxis]
noise_csd.n_fft = 2 # Dividing by n_fft should yield an identity CSD
noise_csds = [noise_csd, noise_csd] # Two frequency bins
stcs_norm = tf_dics(epochs, fwd_surf, noise_csds, tmin, tmax, tstep,
win_lengths, mode='cwt_morlet',
frequencies=frequencies, decim=10, reg=reg,
label=label, normalize_fwd=False,
weight_norm='unit-noise-gain')
assert_allclose(stcs_norm[0].data, stcs[0].data, atol=0)
assert_allclose(stcs_norm[1].data, stcs[1].data, atol=0)
# Test invalid parameter combinations
with pytest.raises(ValueError, match='fourier.*freq_bins" parameter'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode='fourier', freq_bins=None)
with pytest.raises(ValueError, match='cwt_morlet.*frequencies" param'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode='cwt_morlet', frequencies=None)
# Test if incorrect number of noise CSDs is detected
with pytest.raises(ValueError, match='One noise CSD object expected per'):
tf_dics(epochs, fwd_surf, [noise_csds[0]], tmin, tmax, tstep,
win_lengths, freq_bins=freq_bins)
# Test if freq_bins and win_lengths incompatibility is detected
with pytest.raises(ValueError, match='One time window length expected'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep,
win_lengths=[0, 1, 2], freq_bins=freq_bins)
# Test if time step exceeding window lengths is detected
with pytest.raises(ValueError, match='Time step should not be larger'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep=0.15,
win_lengths=[0.2, 0.1], freq_bins=freq_bins)
# Test if incorrent number of n_ffts is detected
with pytest.raises(ValueError, match='When specifying number of FFT'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep,
win_lengths, freq_bins=freq_bins, n_ffts=[1])
# Test if incorrect number of mt_bandwidths is detected
with pytest.raises(ValueError, match='When using multitaper mode and'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep,
win_lengths=win_lengths, freq_bins=freq_bins,
mode='multitaper', mt_bandwidths=[20])
# Test if subtracting evoked responses yields NaN's, since we only have one
# epoch. Suppress division warnings.
assert len(epochs) == 1, len(epochs)
with np.errstate(invalid='ignore'):
stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode='cwt_morlet', frequencies=frequencies,
subtract_evoked=True, reg=reg, label=label, decim=20)
assert np.all(np.isnan(stcs[0].data))
run_tests_if_main()
| bsd-3-clause |
seem-sky/kbengine | kbe/src/lib/python/Lib/test/test_zipfile64.py | 72 | 4274 | # Tests of the full ZIP64 functionality of zipfile
# The support.requires call is the only reason for keeping this separate
# from test_zipfile
from test import support
# XXX(nnorwitz): disable this test by looking for extra largfile resource
# which doesn't exist. This test takes over 30 minutes to run in general
# and requires more disk space than most of the buildbots.
support.requires(
'extralargefile',
'test requires loads of disk-space bytes and a long time to run'
)
import zipfile, os, unittest
import time
import sys
from io import StringIO
from tempfile import TemporaryFile
from test.support import TESTFN, run_unittest, requires_zlib
TESTFN2 = TESTFN + "2"
# How much time in seconds can pass before we print a 'Still working' message.
_PRINT_WORKING_MSG_INTERVAL = 5 * 60
class TestsWithSourceFile(unittest.TestCase):
def setUp(self):
# Create test data.
line_gen = ("Test of zipfile line %d." % i for i in range(1000000))
self.data = '\n'.join(line_gen).encode('ascii')
# And write it to a file.
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
def zipTest(self, f, compression):
# Create the ZIP archive.
zipfp = zipfile.ZipFile(f, "w", compression)
# It will contain enough copies of self.data to reach about 6GB of
# raw data to store.
filecount = 6*1024**3 // len(self.data)
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
for num in range(filecount):
zipfp.writestr("testfn%d" % num, self.data)
# Print still working message since this test can be really slow
if next_time <= time.time():
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
print((
' zipTest still writing %d of %d, be patient...' %
(num, filecount)), file=sys.__stdout__)
sys.__stdout__.flush()
zipfp.close()
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
for num in range(filecount):
self.assertEqual(zipfp.read("testfn%d" % num), self.data)
# Print still working message since this test can be really slow
if next_time <= time.time():
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
print((
' zipTest still reading %d of %d, be patient...' %
(num, filecount)), file=sys.__stdout__)
sys.__stdout__.flush()
zipfp.close()
def testStored(self):
# Try the temp file first. If we do TESTFN2 first, then it hogs
# gigabytes of disk space for the duration of the test.
for f in TemporaryFile(), TESTFN2:
self.zipTest(f, zipfile.ZIP_STORED)
@requires_zlib
def testDeflated(self):
# Try the temp file first. If we do TESTFN2 first, then it hogs
# gigabytes of disk space for the duration of the test.
for f in TemporaryFile(), TESTFN2:
self.zipTest(f, zipfile.ZIP_DEFLATED)
def tearDown(self):
for fname in TESTFN, TESTFN2:
if os.path.exists(fname):
os.remove(fname)
class OtherTests(unittest.TestCase):
def testMoreThan64kFiles(self):
# This test checks that more than 64k files can be added to an archive,
# and that the resulting archive can be read properly by ZipFile
zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=False)
zipf.debug = 100
numfiles = (1 << 16) * 3//2
for i in range(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf2 = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(len(zipf2.namelist()), numfiles)
for i in range(numfiles):
content = zipf2.read("foo%08d" % i).decode('ascii')
self.assertEqual(content, "%d" % (i**3 % 57))
zipf.close()
def tearDown(self):
support.unlink(TESTFN)
support.unlink(TESTFN2)
def test_main():
run_unittest(TestsWithSourceFile, OtherTests)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
saleemjaveds/https-github.com-openstack-nova | nova/tests/api/openstack/compute/contrib/test_server_external_events.py | 12 | 6651 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute.contrib import server_external_events
from nova import context
from nova import exception
from nova import objects
from nova.openstack.common import jsonutils
from nova import test
fake_instances = {
'00000000-0000-0000-0000-000000000001': objects.Instance(
uuid='00000000-0000-0000-0000-000000000001', host='host1'),
'00000000-0000-0000-0000-000000000002': objects.Instance(
uuid='00000000-0000-0000-0000-000000000002', host='host1'),
'00000000-0000-0000-0000-000000000003': objects.Instance(
uuid='00000000-0000-0000-0000-000000000003', host='host2'),
'00000000-0000-0000-0000-000000000004': objects.Instance(
uuid='00000000-0000-0000-0000-000000000004', host=None),
}
fake_instance_uuids = sorted(fake_instances.keys())
MISSING_UUID = '00000000-0000-0000-0000-000000000005'
@classmethod
def fake_get_by_uuid(cls, context, uuid):
try:
return fake_instances[uuid]
except KeyError:
raise exception.InstanceNotFound(instance_id=uuid)
@mock.patch('nova.objects.instance.Instance.get_by_uuid', fake_get_by_uuid)
class ServerExternalEventsTest(test.NoDBTestCase):
def setUp(self):
super(ServerExternalEventsTest, self).setUp()
self.api = server_external_events.ServerExternalEventsController()
self.context = context.get_admin_context()
self.event_1 = {'name': 'network-vif-plugged',
'tag': 'foo',
'server_uuid': fake_instance_uuids[0]}
self.event_2 = {'name': 'network-changed',
'server_uuid': fake_instance_uuids[1]}
self.default_body = {'events': [self.event_1, self.event_2]}
self.resp_event_1 = dict(self.event_1)
self.resp_event_1['code'] = 200
self.resp_event_1['status'] = 'completed'
self.resp_event_2 = dict(self.event_2)
self.resp_event_2['code'] = 200
self.resp_event_2['status'] = 'completed'
self.default_resp_body = {'events': [self.resp_event_1,
self.resp_event_2]}
def _create_req(self, body):
req = webob.Request.blank('/v2/fake/os-server-external-events')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
req.body = jsonutils.dumps(body)
return req
def _assert_call(self, req, body, expected_uuids, expected_events):
with mock.patch.object(self.api.compute_api,
'external_instance_event') as api_method:
response = self.api.create(req, body)
result = response.obj
code = response._code
self.assertEqual(1, api_method.call_count)
for inst in api_method.call_args_list[0][0][1]:
expected_uuids.remove(inst.uuid)
self.assertEqual([], expected_uuids)
for event in api_method.call_args_list[0][0][2]:
expected_events.remove(event.name)
self.assertEqual([], expected_events)
return result, code
def test_create(self):
req = self._create_req(self.default_body)
result, code = self._assert_call(req, self.default_body,
fake_instance_uuids[:2],
['network-vif-plugged',
'network-changed'])
self.assertEqual(self.default_resp_body, result)
self.assertEqual(200, code)
def test_create_one_bad_instance(self):
body = self.default_body
body['events'][1]['server_uuid'] = MISSING_UUID
req = self._create_req(body)
result, code = self._assert_call(req, body, [fake_instance_uuids[0]],
['network-vif-plugged'])
self.assertEqual('failed', result['events'][1]['status'])
self.assertEqual(200, result['events'][0]['code'])
self.assertEqual(404, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_event_instance_has_no_host(self):
body = self.default_body
body['events'][0]['server_uuid'] = fake_instance_uuids[-1]
req = self._create_req(body)
# the instance without host should not be passed to the compute layer
result, code = self._assert_call(req, body,
[fake_instance_uuids[1]],
['network-changed'])
self.assertEqual(422, result['events'][0]['code'])
self.assertEqual('failed', result['events'][0]['status'])
self.assertEqual(200, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_no_good_instances(self):
body = self.default_body
body['events'][0]['server_uuid'] = MISSING_UUID
body['events'][1]['server_uuid'] = MISSING_UUID
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.api.create, req, body)
def test_create_bad_status(self):
body = self.default_body
body['events'][1]['status'] = 'foo'
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
def test_create_extra_gorp(self):
body = self.default_body
body['events'][0]['foobar'] = 'bad stuff'
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
def test_create_bad_events(self):
body = {'events': 'foo'}
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
def test_create_bad_body(self):
body = {'foo': 'bar'}
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
| apache-2.0 |
phalax4/CarnotKE | jyhton/lib-python/2.7/unittest/test/test_skipping.py | 70 | 5731 | import unittest
from .support import LoggingResult
class Test_TestSkipping(unittest.TestCase):
def test_skipping(self):
class Foo(unittest.TestCase):
def test_skip_me(self):
self.skipTest("skip")
events = []
result = LoggingResult(events)
test = Foo("test_skip_me")
test.run(result)
self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "skip")])
# Try letting setUp skip the test now.
class Foo(unittest.TestCase):
def setUp(self):
self.skipTest("testing")
def test_nothing(self): pass
events = []
result = LoggingResult(events)
test = Foo("test_nothing")
test.run(result)
self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(result.testsRun, 1)
def test_skipping_decorators(self):
op_table = ((unittest.skipUnless, False, True),
(unittest.skipIf, True, False))
for deco, do_skip, dont_skip in op_table:
class Foo(unittest.TestCase):
@deco(do_skip, "testing")
def test_skip(self): pass
@deco(dont_skip, "testing")
def test_dont_skip(self): pass
test_do_skip = Foo("test_skip")
test_dont_skip = Foo("test_dont_skip")
suite = unittest.TestSuite([test_do_skip, test_dont_skip])
events = []
result = LoggingResult(events)
suite.run(result)
self.assertEqual(len(result.skipped), 1)
expected = ['startTest', 'addSkip', 'stopTest',
'startTest', 'addSuccess', 'stopTest']
self.assertEqual(events, expected)
self.assertEqual(result.testsRun, 2)
self.assertEqual(result.skipped, [(test_do_skip, "testing")])
self.assertTrue(result.wasSuccessful())
def test_skip_class(self):
@unittest.skip("testing")
class Foo(unittest.TestCase):
def test_1(self):
record.append(1)
record = []
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
def test_skip_non_unittest_class_old_style(self):
@unittest.skip("testing")
class Mixin:
def test_1(self):
record.append(1)
class Foo(Mixin, unittest.TestCase):
pass
record = []
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
def test_skip_non_unittest_class_new_style(self):
@unittest.skip("testing")
class Mixin(object):
def test_1(self):
record.append(1)
class Foo(Mixin, unittest.TestCase):
pass
record = []
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
def test_expected_failure(self):
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
self.fail("help me!")
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest', 'addExpectedFailure', 'stopTest'])
self.assertEqual(result.expectedFailures[0][0], test)
self.assertTrue(result.wasSuccessful())
def test_unexpected_success(self):
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
pass
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest', 'addUnexpectedSuccess', 'stopTest'])
self.assertFalse(result.failures)
self.assertEqual(result.unexpectedSuccesses, [test])
self.assertTrue(result.wasSuccessful())
def test_skip_doesnt_run_setup(self):
class Foo(unittest.TestCase):
wasSetUp = False
wasTornDown = False
def setUp(self):
Foo.wasSetUp = True
def tornDown(self):
Foo.wasTornDown = True
@unittest.skip('testing')
def test_1(self):
pass
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertFalse(Foo.wasSetUp)
self.assertFalse(Foo.wasTornDown)
def test_decorated_skip(self):
def decorator(func):
def inner(*a):
return func(*a)
return inner
class Foo(unittest.TestCase):
@decorator
@unittest.skip('testing')
def test_1(self):
pass
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Xarthisius/piernik | python/repo_crawler.py | 2 | 2129 | #!/usr/bin/env python
import os
import pysvn
import tempfile
import piernik_setup as psetup
import subprocess as sp
import shutil
from drive_via_spawn import parse_mpisignals
PIERNIK_REPO = r"svn+ssh://ladon/piernik/piernik/trunk"
REV_START = 7700
REV_END = 7733
PROBLEM = "mcrwind"
SETUP_CMD = "%s --param problem.par.build --compiler gnu47 --debug" % PROBLEM
SETUP_CMD_OLD = "%s --param problem.par.build --compiler gnudbg" % PROBLEM
HDF5_FILE = "crwind_final_tst_0001.h5"
def get_rev(rev):
return pysvn.Revision(pysvn.opt_revision_kind.number, rev)
def compare_with_previous(tdir, fname, pref):
gold_file = os.path.join(tdir, 'previous.h5')
if os.path.isfile(gold_file):
if sp.call([os.path.join(test_dir, 'piernik_problem.py'),
gold_file, fname]) is not 0:
shutil.move('diff.png', os.path.join(test_dir, "diff_%s" % pref))
shutil.move('diff_bare.png',
os.path.join(test_dir, "bare_%s" % pref))
shutil.move(fname, gold_file)
return
# test_dir = tempfile.mkdtemp()
test_dir = '/dev/shm/dupa'
os.mkdir(test_dir)
client = pysvn.Client()
client.checkout(PIERNIK_REPO, test_dir, revision=get_rev(REV_START))
cwd = os.getcwd()
shutil.copy('piernik_problem.py', test_dir)
os.chdir(test_dir)
prev_rev = REV_START
for rev in range(REV_START, REV_END):
client.update('.', revision=get_rev(rev))
try:
psetup.setup_piernik(SETUP_CMD)
except IOError:
psetup.setup_piernik(SETUP_CMD_OLD)
run_dir = os.path.join(test_dir, "runs", PROBLEM)
piernik_exe = os.path.join(run_dir, "piernik")
hdf_file = os.path.join(run_dir, HDF5_FILE)
if not os.path.isfile(piernik_exe):
print "Failed to compile piernik, revision will be skipped."
else:
sp.call([piernik_exe, "-w", run_dir, "-p", run_dir])
if not os.path.isfile(hdf_file):
print "Failed to execute piernik, revision will be skipped."
else:
pref = "%i_%i.png" % (prev_rev, rev)
compare_with_previous(test_dir, hdf_file, pref)
prev_rev = rev
shutil.rmtree(run_dir)
print test_dir
| gpl-3.0 |
ychen820/microblog | y/google-cloud-sdk/lib/requests/compat.py | 571 | 2556 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| bsd-3-clause |
sandeepdsouza93/TensorFlow-15712 | tensorflow/python/kernel_tests/check_ops_test.py | 20 | 28413 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.check_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class AssertProperIterableTest(tf.test.TestCase):
def test_single_tensor_raises(self):
tensor = tf.constant(1)
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(tensor)
def test_single_sparse_tensor_raises(self):
ten = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(ten)
def test_single_ndarray_raises(self):
array = np.array([1, 2, 3])
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(array)
def test_single_string_raises(self):
mystr = "hello"
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(mystr)
def test_non_iterable_object_raises(self):
non_iterable = 1234
with self.assertRaisesRegexp(TypeError, "to be iterable"):
tf.assert_proper_iterable(non_iterable)
def test_list_does_not_raise(self):
list_of_stuff = [tf.constant([11, 22]), tf.constant([1, 2])]
tf.assert_proper_iterable(list_of_stuff)
def test_generator_does_not_raise(self):
generator_of_stuff = (tf.constant([11, 22]), tf.constant([1, 2]))
tf.assert_proper_iterable(generator_of_stuff)
class AssertEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies(
[tf.assert_equal(big, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval()
def test_raises_when_less(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_equal(small, big)]):
out = tf.identity(small)
with self.assertRaisesOpError("small.*big"):
out.eval()
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
small_2 = tf.constant([1, 2], name="small_2")
with tf.control_dependencies([tf.assert_equal(small, small_2)]):
out = tf.identity(small)
out.eval()
def test_raises_when_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
small_2 = tf.constant([1, 1], name="small_2")
with self.assertRaisesRegexp(ValueError, "must be"):
with tf.control_dependencies([tf.assert_equal(small, small_2)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertLessTest(tf.test.TestCase):
def test_raises_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies(
[tf.assert_less(small, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*small.*small"):
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_less(big, small)]):
out = tf.identity(small)
with self.assertRaisesOpError("big.*small"):
out.eval()
def test_doesnt_raise_when_less(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 2], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_less(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertLessEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_less_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies(
[tf.assert_less_equal(big, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval()
def test_doesnt_raise_when_less_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 1], name="big")
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 1], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_less_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertGreaterTest(tf.test.TestCase):
def test_raises_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies(
[tf.assert_greater(small, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*small.*small"):
out.eval()
def test_raises_when_less(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_greater(small, big)]):
out = tf.identity(big)
with self.assertRaisesOpError("small.*big"):
out.eval()
def test_doesnt_raise_when_greater(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_greater(big, small)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_greater_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_greater(big, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 2], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
with tf.control_dependencies([tf.assert_greater(big, small)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_greater(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertGreaterEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_greater_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies(
[tf.assert_greater_equal(small, big, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*small.*big"):
out.eval()
def test_doesnt_raise_when_greater_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 1], name="big")
with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="big")
big = tf.constant([3, 1], name="small")
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_greater_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertNegativeTest(tf.test.TestCase):
def test_doesnt_raise_when_negative(self):
with self.test_session():
frank = tf.constant([-1, -2], name="frank")
with tf.control_dependencies([tf.assert_negative(frank)]):
out = tf.identity(frank)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
doug = tf.constant([1, 2], name="doug")
with tf.control_dependencies([tf.assert_negative(doug, message="fail")]):
out = tf.identity(doug)
with self.assertRaisesOpError("fail.*doug"):
out.eval()
def test_raises_when_zero(self):
with self.test_session():
claire = tf.constant([0], name="claire")
with tf.control_dependencies([tf.assert_negative(claire)]):
out = tf.identity(claire)
with self.assertRaisesOpError("claire"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is negative when it satisfies:
# For every element x_i in x, x_i < 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_negative(empty)]):
out = tf.identity(empty)
out.eval()
class AssertPositiveTest(tf.test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
freddie = tf.constant([-1, -2], name="freddie")
with tf.control_dependencies(
[tf.assert_positive(freddie, message="fail")]):
out = tf.identity(freddie)
with self.assertRaisesOpError("fail.*freddie"):
out.eval()
def test_doesnt_raise_when_positive(self):
with self.test_session():
remmy = tf.constant([1, 2], name="remmy")
with tf.control_dependencies([tf.assert_positive(remmy)]):
out = tf.identity(remmy)
out.eval()
def test_raises_when_zero(self):
with self.test_session():
meechum = tf.constant([0], name="meechum")
with tf.control_dependencies([tf.assert_positive(meechum)]):
out = tf.identity(meechum)
with self.assertRaisesOpError("meechum"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is positive when it satisfies:
# For every element x_i in x, x_i > 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_positive(empty)]):
out = tf.identity(empty)
out.eval()
class AssertRankTest(tf.test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(
ValueError, "fail.*my_tensor.*must have rank 1"):
with tf.control_dependencies(
[tf.assert_rank(tensor, desired_rank, message="fail")]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies(
[tf.assert_rank(tensor, desired_rank, message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 0
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 2
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_raises_if_rank_is_not_scalar_static(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
tf.assert_rank(tensor, np.array([], dtype=np.int32))
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.test_session():
tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
rank_tensor = tf.placeholder(tf.int32, name="rank_tensor")
with self.assertRaisesOpError("Rank must be a scalar"):
with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
tf.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
def test_raises_if_rank_is_not_integer_static(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
tf.assert_rank(tensor, .5)
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
rank_tensor = tf.placeholder(tf.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
tf.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankAtLeastTest(tf.test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank at least 1"):
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_ten_doesnt_raise_raise_if_rank_too_large_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_ten_doesnt_raise_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 2
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
class AssertNonNegativeTest(tf.test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
zoe = tf.constant([-1, -2], name="zoe")
with tf.control_dependencies([tf.assert_non_negative(zoe)]):
out = tf.identity(zoe)
with self.assertRaisesOpError("zoe"):
out.eval()
def test_doesnt_raise_when_zero_and_positive(self):
with self.test_session():
lucas = tf.constant([0, 2], name="lucas")
with tf.control_dependencies([tf.assert_non_negative(lucas)]):
out = tf.identity(lucas)
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-negative when it satisfies:
# For every element x_i in x, x_i >= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_non_negative(empty)]):
out = tf.identity(empty)
out.eval()
class AssertNonPositiveTest(tf.test.TestCase):
def test_doesnt_raise_when_zero_and_negative(self):
with self.test_session():
tom = tf.constant([0, -2], name="tom")
with tf.control_dependencies([tf.assert_non_positive(tom)]):
out = tf.identity(tom)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
rachel = tf.constant([0, 2], name="rachel")
with tf.control_dependencies([tf.assert_non_positive(rachel)]):
out = tf.identity(rachel)
with self.assertRaisesOpError("rachel"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-positive when it satisfies:
# For every element x_i in x, x_i <= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_non_positive(empty)]):
out = tf.identity(empty)
out.eval()
class AssertIntegerTest(tf.test.TestCase):
def test_doesnt_raise_when_integer(self):
with self.test_session():
integers = tf.constant([1, 2], name="integers")
with tf.control_dependencies([tf.assert_integer(integers)]):
out = tf.identity(integers)
out.eval()
def test_raises_when_float(self):
with self.test_session():
floats = tf.constant([1.0, 2.0], name="floats")
with self.assertRaisesRegexp(TypeError, "Expected.*integer"):
tf.assert_integer(floats)
class IsStrictlyIncreasingTest(tf.test.TestCase):
def test_constant_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([1, 0, -1]).eval())
def test_2d_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([[1, 3], [2, 4]]).eval())
def test_increasing_tensor_is_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([[-1, 2], [3, 4]]).eval())
def test_tensor_with_one_element_is_strictly_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([1]).eval())
def test_empty_tensor_is_strictly_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([]).eval())
class IsNonDecreasingTest(tf.test.TestCase):
def test_constant_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
self.assertFalse(tf.is_non_decreasing([3, 2, 1]).eval())
def test_2d_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
self.assertFalse(tf.is_non_decreasing([[1, 3], [2, 4]]).eval())
def test_increasing_rank_one_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([[-1, 2], [3, 3]]).eval())
def test_tensor_with_one_element_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1]).eval())
def test_empty_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([]).eval())
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
paulsmith/geodjango | tests/modeltests/field_defaults/models.py | 13 | 1423 | """
32. Callable defaults
You can pass callable objects as the ``default`` parameter to a field. When
the object is created without an explicit value passed in, Django will call
the method to determine the default value.
This example uses ``datetime.datetime.now`` as the default for the ``pub_date``
field.
"""
from django.db import models
from datetime import datetime
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return self.headline
__test__ = {'API_TESTS':"""
>>> from datetime import datetime
# No articles are in the system yet.
>>> Article.objects.all()
[]
# Create an Article.
>>> a = Article(id=None)
# Grab the current datetime it should be very close to the default that just
# got saved as a.pub_date
>>> now = datetime.now()
# Save it into the database. You have to call save() explicitly.
>>> a.save()
# Now it has an ID. Note it's a long integer, as designated by the trailing "L".
>>> a.id
1L
# Access database columns via Python attributes.
>>> a.headline
u'Default headline'
# make sure the two dates are sufficiently close
>>> d = now - a.pub_date
>>> d.seconds < 5
True
# make sure that SafeUnicode fields work
>>> from django.utils.safestring import SafeUnicode
>>> a.headline = SafeUnicode(u'SafeUnicode Headline')
>>> a.save()
"""}
| bsd-3-clause |
GIP-RECIA/esup-news | web-root/fckeditor/editor/filemanager/connectors/py/wsgi.py | 89 | 1629 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| gpl-3.0 |
Zhongqilong/kbengine | kbe/src/lib/python/Tools/scripts/eptags.py | 89 | 1485 | #! /usr/bin/env python3
"""Create a TAGS file for Python programs, usable with GNU Emacs.
usage: eptags pyfiles...
The output TAGS file is usable with Emacs version 18, 19, 20.
Tagged are:
- functions (even inside other defs or classes)
- classes
eptags warns about files it cannot open.
eptags will not give warnings about duplicate tags.
BUGS:
Because of tag duplication (methods with the same name in different
classes), TAGS files are not very useful for most object-oriented
python projects.
"""
import sys,re
expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z_][a-zA-Z0-9_]*)[ \t]*[:\(]'
matcher = re.compile(expr)
def treat_file(filename, outfp):
"""Append tags found in file named 'filename' to the open file 'outfp'"""
try:
fp = open(filename, 'r')
except:
sys.stderr.write('Cannot open %s\n'%filename)
return
charno = 0
lineno = 0
tags = []
size = 0
while 1:
line = fp.readline()
if not line:
break
lineno = lineno + 1
m = matcher.search(line)
if m:
tag = m.group(0) + '\177%d,%d\n' % (lineno, charno)
tags.append(tag)
size = size + len(tag)
charno = charno + len(line)
outfp.write('\f\n%s,%d\n' % (filename,size))
for tag in tags:
outfp.write(tag)
def main():
outfp = open('TAGS', 'w')
for filename in sys.argv[1:]:
treat_file(filename, outfp)
if __name__=="__main__":
main()
| lgpl-3.0 |
danielvdende/incubator-airflow | airflow/contrib/operators/hive_to_dynamodb.py | 21 | 4084 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
from airflow.hooks.hive_hooks import HiveServer2Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class HiveToDynamoDBTransferOperator(BaseOperator):
"""
Moves data from Hive to DynamoDB, note that for now the data is loaded
into memory before being pushed to DynamoDB, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the hive database. (templated)
:type sql: str
:param table_name: target DynamoDB table
:type table_name: str
:param table_keys: partition key and sort key
:type table_keys: list
:param pre_process: implement pre-processing of source data
:type pre_process: function
:param pre_process_args: list of pre_process function arguments
:type pre_process_args: list
:param pre_process_kwargs: dict of pre_process function arguments
:type pre_process_kwargs: dict
:param region_name: aws region name (example: us-east-1)
:type region_name: str
:param schema: hive database schema
:type schema: str
:param hiveserver2_conn_id: source hive connection
:type hiveserver2_conn_id: str
:param aws_conn_id: aws connection
:type aws_conn_id: str
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
table_name,
table_keys,
pre_process=None,
pre_process_args=None,
pre_process_kwargs=None,
region_name=None,
schema='default',
hiveserver2_conn_id='hiveserver2_default',
aws_conn_id='aws_default',
*args, **kwargs):
super(HiveToDynamoDBTransferOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.table_name = table_name
self.table_keys = table_keys
self.pre_process = pre_process
self.pre_process_args = pre_process_args
self.pre_process_kwargs = pre_process_kwargs
self.region_name = region_name
self.schema = schema
self.hiveserver2_conn_id = hiveserver2_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
self.log.info('Extracting data from Hive')
self.log.info(self.sql)
data = hive.get_pandas_df(self.sql, schema=self.schema)
dynamodb = AwsDynamoDBHook(aws_conn_id=self.aws_conn_id,
table_name=self.table_name,
table_keys=self.table_keys,
region_name=self.region_name)
self.log.info('Inserting rows into dynamodb')
if self.pre_process is None:
dynamodb.write_batch_data(
json.loads(data.to_json(orient='records')))
else:
dynamodb.write_batch_data(
self.pre_process(data=data,
args=self.pre_process_args,
kwargs=self.pre_process_kwargs))
self.log.info('Done.')
| apache-2.0 |
AlexCaranha/Wox | PythonHome/Lib/site-packages/pip/_vendor/requests/packages/chardet/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| mit |
sbrunner/QGIS | python/plugins/processing/algs/gdal/ClipRasterByMask.py | 5 | 7383 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ClipRasterByMask.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ClipRasterByMask(GdalAlgorithm):
INPUT = 'INPUT'
MASK = 'MASK'
NODATA = 'NODATA'
ALPHA_BAND = 'ALPHA_BAND'
CROP_TO_CUTLINE = 'CROP_TO_CUTLINE'
KEEP_RESOLUTION = 'KEEP_RESOLUTION'
OPTIONS = 'OPTIONS'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterFeatureSource(self.MASK,
self.tr('Mask layer'),
[QgsProcessing.TypeVectorPolygon]))
self.addParameter(QgsProcessingParameterNumber(self.NODATA,
self.tr('Assign a specified nodata value to output bands'),
type=QgsProcessingParameterNumber.Double,
defaultValue=0.0,
optional=True))
self.addParameter(QgsProcessingParameterBoolean(self.ALPHA_BAND,
self.tr('Create and output alpha band'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.CROP_TO_CUTLINE,
self.tr('Crop the extent of the target dataset to the extent of the cutline'),
defaultValue=True))
self.addParameter(QgsProcessingParameterBoolean(self.KEEP_RESOLUTION,
self.tr('Keep resolution of output raster'),
defaultValue=False))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation parameters'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=5)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Clipped (mask)')))
def name(self):
return 'cliprasterbymasklayer'
def displayName(self):
return self.tr('Clip raster by mask layer')
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'raster-clip.png'))
def group(self):
return self.tr('Raster extraction')
def groupId(self):
return 'rasterextraction'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
maskLayer, maskLayerName = self.getOgrCompatibleSource(self.MASK, parameters, context, feedback, executing)
nodata = self.parameterAsDouble(parameters, self.NODATA, context)
options = self.parameterAsString(parameters, self.OPTIONS, context)
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
arguments = []
arguments.append('-ot')
arguments.append(self.TYPES[self.parameterAsEnum(parameters, self.DATA_TYPE, context)])
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
if self.parameterAsBool(parameters, self.KEEP_RESOLUTION, context):
arguments.append('-tr')
arguments.append(str(inLayer.rasterUnitsPerPixelX()))
arguments.append(str(-inLayer.rasterUnitsPerPixelY()))
arguments.append('-tap')
arguments.append('-cutline')
arguments.append(maskLayer)
if self.parameterAsBool(parameters, self.CROP_TO_CUTLINE, context):
arguments.append('-crop_to_cutline')
if self.parameterAsBool(parameters, self.ALPHA_BAND, context):
arguments.append('-dstalpha')
if nodata:
arguments.append('-dstnodata {}'.format(nodata))
if options:
arguments.append('-co')
arguments.append(options)
arguments.append(inLayer.source())
arguments.append(out)
return ['gdalwarp', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
samuelclay/NewsBlur | vendor/oauth2client/multistore_file.py | 1 | 11266 | # Copyright 2011 Google Inc. All Rights Reserved.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = 'jbeda@google.com (Joe Beda)'
import base64
import errno
import logging
import os
import threading
from .anyjson import simplejson
from oauth2client.client import Storage as BaseStorage
from oauth2client.client import Credentials
from oauth2client import util
from .locked_file import LockedFile
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
@util.positional(4)
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or list of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
filename = os.path.realpath(os.path.expanduser(filename))
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly=warn_on_readonly))
finally:
_multistores_lock.release()
if type(scope) is list:
scope = ' '.join(scope)
return multistore._get_storage(client_id, user_agent, scope)
class _MultiStore(object):
"""A file backed store for multiple credentials."""
@util.positional(2)
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+b', 'rb')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# (client_id, user_agent, scope) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, client_id, user_agent, scope):
self._multistore = multistore
self._client_id = client_id
self._user_agent = user_agent
self._scope = scope
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(
self._client_id, self._user_agent, self._scope)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(credentials, self._scope)
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._delete_credential(self._client_id, self._user_agent,
self._scope)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._file.filename()):
old_umask = os.umask(0o177)
try:
open(self._file.filename(), 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
self._file.open_and_lock()
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
self._file.unlock_and_close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file.file_handle().seek(0)
return simplejson.load(self._file.file_handle())
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file.file_handle().seek(0)
simplejson.dump(data, self._file.file_handle(), sort_keys=True, indent=2)
self._file.file_handle().truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
client_id = raw_key['clientId']
user_agent = raw_key['userAgent']
scope = raw_key['scope']
key = (client_id, user_agent, scope)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in list(self._data.items()):
raw_key = {
'clientId': cred_key[0],
'userAgent': cred_key[1],
'scope': cred_key[2]
}
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_credential(self, client_id, user_agent, scope):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
The credential specified or None if not present
"""
key = (client_id, user_agent, scope)
return self._data.get(key, None)
def _update_credential(self, cred, scope):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
cred: The OAuth2Credential to update/set
scope: The scope(s) that this credential covers
"""
key = (cred.client_id, cred.user_agent, scope)
self._data[key] = cred
self._write()
def _delete_credential(self, client_id, user_agent, scope):
"""Delete a credential and write the multistore.
This must be called when the multistore is locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: The scope(s) that this credential covers
"""
key = (client_id, user_agent, scope)
try:
del self._data[key]
except KeyError:
pass
self._write()
def _get_storage(self, client_id, user_agent, scope):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, client_id, user_agent, scope)
| mit |
inasafe/inasafe | safe/metadata/hazard_layer_metadata.py | 11 | 1997 | # coding=utf-8
"""Hazard Layer Metadata."""
from safe.metadata.generic_layer_metadata import GenericLayerMetadata
from safe.metadata.utilities import merge_dictionaries
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
class HazardLayerMetadata(GenericLayerMetadata):
"""
Metadata class for hazard layers
.. versionadded:: 3.2
"""
_standard_properties = {
'hazard': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'hazard/'
'gco:CharacterString'),
'hazard_category': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'hazard_category/'
'gco:CharacterString'),
'continuous_hazard_unit': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'continuous_hazard_unit/'
'gco:CharacterString'),
'value_maps': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'value_map/'
'gco:Dictionary'),
'thresholds': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'thresholds/'
'gco:Dictionary'),
'active_band': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'active_band/'
'gco:Integer'),
}
_standard_properties = merge_dictionaries(
GenericLayerMetadata._standard_properties, _standard_properties)
| gpl-3.0 |
themurph/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.40/roles/lib_openshift/src/ansible/oc_label.py | 84 | 1037 | # pylint: skip-file
# flake8: noqa
def main():
''' ansible oc module for labels '''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list', 'add']),
debug=dict(default=False, type='bool'),
kind=dict(default='node', type='str',
choices=['node', 'pod', 'namespace']),
name=dict(default=None, type='str'),
namespace=dict(default=None, type='str'),
labels=dict(default=None, type='list'),
selector=dict(default=None, type='str'),
),
supports_check_mode=True,
mutually_exclusive=(['name', 'selector']),
)
results = OCLabel.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
| apache-2.0 |
richard-shepherd/monopyly | AIs/Cedric Daligny/VSSchizoAI.py | 1 | 29822 | __author__ = 'Cedric'
# each information will be used to sort the properties for the given policy
import random
from monopyly import *
from .Memory import *
from .Policy import *
class VSSchizoAI(PlayerAIBase):
'''
'''
def __init__(self):
'''
ctor
'''
# memory information
self.needed_money = 0
self.auction_memory = AuctionMemory()
self.deal_memory = DealMemory()
self.property_policy = AcquiringPolicy(self)
self.house_policy = HousePolicy_v2(self,HousePolicy_v2.HousePolicy.ONE_COMPLETE_SET, HousePolicy_v2.RepartitionPolicy.SAME_SIZE)
self.selling_policy = SellingPolicy(self,self.deal_memory)
self.chance_policy = ChancePolicy(random.random())
self.jail_policy = JailPolicy(random.random(), random.random() * 10, random.random()*10 + 12)
self.money_to_be_taken = 0
self.properties_information = {
#brown
Square.Name.OLD_KENT_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WHITECHAPEL_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#light blue
Square.Name.THE_ANGEL_ISLINGTON: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.EUSTON_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.PENTONVILLE_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#rose
Square.Name.PALL_MALL: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WHITEHALL: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.NORTHUMBERLAND_AVENUE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#orange
Square.Name.BOW_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MARLBOROUGH_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.VINE_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#red
Square.Name.STRAND: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.FLEET_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.TRAFALGAR_SQUARE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#yellow
Square.Name.LEICESTER_SQUARE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.COVENTRY_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.PICCADILLY: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#green
Square.Name.REGENT_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.OXFORD_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.BOND_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#dark blue
Square.Name.PARK_LANE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MAYFAIR: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#station
Square.Name.KINGS_CROSS_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MARYLEBONE_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.FENCHURCH_STREET_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.LIVERPOOL_STREET_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#company
Square.Name.ELECTRIC_COMPANY: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WATER_WORKS: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
}
def get_name(self):
return 'VSSchizoAI'
def start_of_game(self):
'''
Called at the start of the game.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
def start_of_turn(self, game_state, player):
'''
Called when an AI's turn starts. All AIs receive this notification.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
self.chance_policy = ChancePolicy(random.random())
self.jail_policy = JailPolicy(random.random(), random.random() * 10, random.random()*10 + 12)
self.money_to_be_taken = 0
self.properties_information = {
#brown
Square.Name.OLD_KENT_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WHITECHAPEL_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#light blue
Square.Name.THE_ANGEL_ISLINGTON: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.EUSTON_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.PENTONVILLE_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#rose
Square.Name.PALL_MALL: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WHITEHALL: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.NORTHUMBERLAND_AVENUE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#orange
Square.Name.BOW_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MARLBOROUGH_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.VINE_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#red
Square.Name.STRAND: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.FLEET_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.TRAFALGAR_SQUARE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#yellow
Square.Name.LEICESTER_SQUARE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.COVENTRY_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.PICCADILLY: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#green
Square.Name.REGENT_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.OXFORD_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.BOND_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#dark blue
Square.Name.PARK_LANE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MAYFAIR: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#station
Square.Name.KINGS_CROSS_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MARYLEBONE_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.FENCHURCH_STREET_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.LIVERPOOL_STREET_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#company
Square.Name.ELECTRIC_COMPANY: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WATER_WORKS: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
}
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
return self.property_policy.acquire_through_landing(game_state,player,property)
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
pass
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
'''
self.money_to_be_taken = amount
if amount > player.state.cash:
self.needed_money = amount - player.state.cash
pass
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
pass
def players_birthday(self):
'''
Called when a player picks up the 'It is your birthday...'
Community Chest card.
You should return "Happy Birthday!" (with this casing and the
exclamation mark). If not, you will have to pay £100 instead of
the standard £10.
'''
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
return self.chance_policy.compute()
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
return self.property_policy.acquire_through_auction(game_state,player,property)
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.auction_memory.add_auction(property,player,amount_paid)
pass
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
return self.house_policy.compute(game_state, player)
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
if self.needed_money > 0:
return self.selling_policy.computeHouse(game_state,player)
return []
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if self.needed_money > 0:
return self.selling_policy.computeMortgage(game_state,player)
return []
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
return self.property_policy.acquire_through_unmortgage(game_state,player)
def get_out_of_jail(self, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
return self.jail_policy.compute(self,game_state,player)
def propose_deal(self, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
if self.needed_money > 0:
return self.selling_policy.propose_deal(game_state,player)
return self.property_policy.acquire_through_deal_proposal(game_state,player,player.state.cash - self.money_to_be_taken)
def deal_proposed(self, game_state, player, deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
return self.property_policy.acquire_through_deal_being_proposed(game_state,player,deal_proposal)
def deal_result(self, deal_info):
'''
Called when a proposed deal has finished. The players involved in
the deal receive this notification.
deal_info is a PlayerAIBase.DealInfo 'enum' giving indicating
whether the deal succeeded, and if not why not.
No response is required.
'''
pass
def deal_completed(self, deal_result):
'''
Called when a deal has successfully completed to let all
players know the details of the deal which took place.
deal_result is a DealResult object.
Note that the cash_transferred_from_proposer_to_proposee in
the deal_result can be negative if cash was transferred from
the proposee to the proposer.
No response is required.
'''
self.deal_memory.add_deal(deal_result)
pass
def player_went_bankrupt(self, player):
'''
Called when a player goes bankrupt.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
if player.name == self.get_name() and player.net_worth + player.state.cash > 0:
property_net_worth = 0
property_with_house = 0
property_unmortgaged = 0
houses = 0
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
property_unmortgaged += 1
property_net_worth += property.mortgage_value
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
property_with_house += 1
houses += property.number_of_houses
property_net_worth += int(property.house_price/2 * property.number_of_houses)
if property_unmortgaged > 0 or property_with_house > 0 or houses > 0:
Logger.log(player.name + " went bankrupt with a cash of " + format(player.state.cash) + " and a net of " + format(player.net_worth) + "/" + format(property_net_worth), Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_unmortgaged) + " properties unmortgaged", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_with_house) + " properties with house", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(houses) + " houses", Logger.ERROR)
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
Logger.log(player.name + " unmortgage property: " + property.name, Logger.ERROR)
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
Logger.log(player.name + " housed property: " + property.name + " / " + format(property.number_of_houses), Logger.ERROR)
#exit(-1)
pass
def player_ran_out_of_time(self, player):
'''
Called when a player is removed from the game because
they ran out of processing time.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def game_over(self, winner, maximum_rounds_played):
'''
Called when the game is over.
All players receive this notification.
winner is the winning player (a Player object) or None if the
game was drawn.
maximum_rounds_played is True if the game went to the round-limit.
No response is required.
'''
pass
def ai_error(self, message):
'''
Called if the return value from any of the Player AI functions
was invalid. for example, if it was not of the expected type.
No response is required.
'''
pass
| mit |
luis-rr/saga-python | src/saga/adaptors/cpi/replica/logical_directory.py | 10 | 2078 |
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import saga.adaptors.cpi.decorators as cpi_dec
import saga.adaptors.cpi.namespace as cpi_ns
import saga.adaptors.cpi.attributes as cpi_att
SYNC = cpi_dec.CPI_SYNC_CALL
ASYNC = cpi_dec.CPI_ASYNC_CALL
# keep order of inheritance! super() below uses MRO
class LogicalDirectory (cpi_ns.directory.Directory,
cpi_att.Attributes) :
# ----------------------------------------------------------------
#
# initialization methods
#
def __init__ (self, api, adaptor) :
self._cpi_nsdirec = super (LogicalDirectory, self)
self._cpi_nsdirec.__init__ (api, adaptor)
@SYNC
def init_instance (self, url, flags, session) : pass
@ASYNC
def init_instance_async (self, url, flags, session) : pass
@SYNC
def open (self, tgt, flags, ttype) : pass
@ASYNC
def open_async (self, tgt, flags, ttype) : pass
@SYNC
def open_dir (self, tgt, flags, ttype) : pass
@ASYNC
def open_dir_async (self, tgt, flags, ttype) : pass
# ----------------------------------------------------------------
#
# replica methods
#
@SYNC
def get_size (self, tgt, ttype) : pass
@ASYNC
def get_size_async (self, tgt, ttype) : pass
@SYNC
def is_file (self, tgt, ttype) : pass
@ASYNC
def is_file_async (self, tgt, ttype) : pass
@SYNC
def is_file_self (self, ttype) : pass
@ASYNC
def is_file_self_async (self, ttype) : pass
@SYNC
def find_replicas (self, name_pattern, attr_pattern, flags, ttype) : pass
@ASYNC
def find_replicas_async (self, name_pattern, attr_pattern, flags, ttype) : pass
| mit |
Crevil/grpc | src/python/grpcio_tests/tests/testing/_application_testing_common.py | 39 | 1533 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc_testing
from tests.testing.proto import requests_pb2
from tests.testing.proto import services_pb2
# TODO(https://github.com/grpc/grpc/issues/11657): Eliminate this entirely.
# TODO(https://github.com/google/protobuf/issues/3452): Eliminate this if/else.
if services_pb2.DESCRIPTOR.services_by_name.get('FirstService') is None:
FIRST_SERVICE = 'Fix protobuf issue 3452!'
FIRST_SERVICE_UNUN = 'Fix protobuf issue 3452!'
FIRST_SERVICE_UNSTRE = 'Fix protobuf issue 3452!'
FIRST_SERVICE_STREUN = 'Fix protobuf issue 3452!'
FIRST_SERVICE_STRESTRE = 'Fix protobuf issue 3452!'
else:
FIRST_SERVICE = services_pb2.DESCRIPTOR.services_by_name['FirstService']
FIRST_SERVICE_UNUN = FIRST_SERVICE.methods_by_name['UnUn']
FIRST_SERVICE_UNSTRE = FIRST_SERVICE.methods_by_name['UnStre']
FIRST_SERVICE_STREUN = FIRST_SERVICE.methods_by_name['StreUn']
FIRST_SERVICE_STRESTRE = FIRST_SERVICE.methods_by_name['StreStre']
| apache-2.0 |
grengojbo/st2 | st2actions/tests/unit/test_runner.py | 5 | 1888 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import simplejson as json
except:
import json
from st2actions.runners import ActionRunner
from st2common.constants.action import (LIVEACTION_STATUS_SUCCEEDED)
RAISE_PROPERTY = 'raise'
def get_runner():
return TestRunner()
class TestRunner(ActionRunner):
def __init__(self):
super(TestRunner, self).__init__(runner_id='1')
self.pre_run_called = False
self.run_called = False
self.post_run_called = False
def pre_run(self):
self.pre_run_called = True
def run(self, action_params):
self.run_called = True
result = {}
if self.runner_parameters.get(RAISE_PROPERTY, False):
raise Exception('Raise required.')
else:
result = {
'ran': True,
'action_params': action_params
}
context = {
'third_party_system': {
'ref_id': '1234'
}
}
return (LIVEACTION_STATUS_SUCCEEDED, json.dumps(result), context)
def post_run(self, status, result):
self.post_run_called = True
| apache-2.0 |
zzxuanyuan/root-compressor-dummy | tutorials/tmva/keras/GenerateModel.py | 20 | 2029 | #!/usr/bin/env python
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.regularizers import l2
from keras import initializations
from keras.optimizers import SGD
# Setup the model here
num_input_nodes = 4
num_output_nodes = 2
num_hidden_layers = 1
nodes_hidden_layer = 64
l2_val = 1e-5
# NOTE: Either you can use predefined initializations (see Keras documentation)
# or you can define your own initialization in such a function
def normal(shape, name=None):
return initializations.normal(shape, scale=0.05, name=name)
model = Sequential()
# Hidden layer 1
# NOTE: Number of input nodes need to be defined in this layer
model.add(Dense(nodes_hidden_layer, init=normal, activation='relu', W_regularizer=l2(l2_val), input_dim=num_input_nodes))
# Hidden layer 2 to num_hidden_layers
# NOTE: Here, you can do what you want
for k in range(num_hidden_layers-1):
model.add(Dense(nodes_hidden_layer, init=normal, activation='relu', W_regularizer=l2(l2_val)))
# Ouput layer
# NOTE: Use following output types for the different tasks
# Binary classification: 2 output nodes with 'softmax' activation
# Regression: 1 output with any activation ('linear' recommended)
# Multiclass classification: (number of classes) output nodes with 'softmax' activation
model.add(Dense(num_output_nodes, init=normal, activation='softmax'))
# Compile model
# NOTE: Use following settings for the different tasks
# Any classification: 'categorical_crossentropy' is recommended loss function
# Regression: 'mean_squared_error' is recommended loss function
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01), metrics=['accuracy',])
# Save model
model.save('model.h5')
# Additional information about the model
# NOTE: This is not needed to run the model
# Print summary
model.summary()
# Visualize model as graph
try:
from keras.utils.visualize_util import plot
plot(model, to_file='model.png', show_shapes=True)
except:
print('[INFO] Failed to make model plot')
| lgpl-2.1 |
nhomar/odoo | openerp/addons/base/res/res_country.py | 283 | 4728 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
def location_name_search(self, cr, user, name='', args=None, operator='ilike',
context=None, limit=100):
if not args:
args = []
ids = []
if len(name) == 2:
ids = self.search(cr, user, [('code', 'ilike', name)] + args,
limit=limit, context=context)
search_domain = [('name', operator, name)]
if ids: search_domain.append(('id', 'not in', ids))
ids.extend(self.search(cr, user, search_domain + args,
limit=limit, context=context))
locations = self.name_get(cr, user, ids, context)
return sorted(locations, key=lambda (id, name): ids.index(id))
class Country(osv.osv):
_name = 'res.country'
_description = 'Country'
_columns = {
'name': fields.char('Country Name',
help='The full name of the country.', required=True, translate=True),
'code': fields.char('Country Code', size=2,
help='The ISO country code in two chars.\n'
'You can use this field for quick search.'),
'address_format': fields.text('Address Format', help="""You can state here the usual format to use for the \
addresses belonging to this country.\n\nYou can use the python-style string patern with all the field of the address \
(for example, use '%(street)s' to display the field 'street') plus
\n%(state_name)s: the name of the state
\n%(state_code)s: the code of the state
\n%(country_name)s: the name of the country
\n%(country_code)s: the code of the country"""),
'currency_id': fields.many2one('res.currency', 'Currency'),
'image': fields.binary("Image"),
'country_group_ids': fields.many2many('res.country.group', 'res_country_res_country_group_rel', 'res_country_id', 'res_country_group_id', string='Country Groups'),
}
_sql_constraints = [
('name_uniq', 'unique (name)',
'The name of the country must be unique !'),
('code_uniq', 'unique (code)',
'The code of the country must be unique !')
]
_defaults = {
'address_format': "%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s",
}
_order='name'
name_search = location_name_search
def create(self, cursor, user, vals, context=None):
if vals.get('code'):
vals['code'] = vals['code'].upper()
return super(Country, self).create(cursor, user, vals,
context=context)
def write(self, cursor, user, ids, vals, context=None):
if vals.get('code'):
vals['code'] = vals['code'].upper()
return super(Country, self).write(cursor, user, ids, vals,
context=context)
class CountryGroup(osv.osv):
_description="Country Group"
_name = 'res.country.group'
_columns = {
'name': fields.char('Name', required=True),
'country_ids': fields.many2many('res.country', 'res_country_res_country_group_rel', 'res_country_group_id', 'res_country_id', string='Countries'),
}
class CountryState(osv.osv):
_description="Country state"
_name = 'res.country.state'
_columns = {
'country_id': fields.many2one('res.country', 'Country',
required=True),
'name': fields.char('State Name', required=True,
help='Administrative divisions of a country. E.g. Fed. State, Departement, Canton'),
'code': fields.char('State Code', size=3,
help='The state code in max. three chars.', required=True),
}
_order = 'code'
name_search = location_name_search
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.