gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
Helper classes for handling file trasfers
"""
import logging
import os
import shutil
import subprocess
from taca.utils.filesystem import create_folder
from taca.utils.misc import hashfile, call_external_command
logger = logging.getLogger(__name__)
class TransferAgent(object):
"""
(Abstract) superclass representing an Agent that performs file transfers.
Agents implementing specific methods for transferring files should extend
this and implement the transfer() method.
"""
def __init__(
self,
src_path=None,
dest_path=None,
opts={},
**kwargs):
""" Creates an agent instance
:param string src_path: the file or folder that should be transferred
:param string dest_path: the destination file or folder
:param bool validate: whether to validate the transferred files
:param opts: options that will be passed to the transfer command
"""
self.src_path = src_path
self.dest_path = dest_path
self.validate = kwargs.get('validate',False)
self.cmdopts = opts
def __str__(self):
return type(self).__name__
def format_options(self):
""" Format the options dictionary stored in this instance's cmdopts
attribute and return the formatted options as a list of strings.
A key in the dictionary represents the option name. If
the corresponding value is None, the option will be assumed to
represent a flag. If the value is a list, the option will be given
multiple times.
For example:
opts = {'opt1': None, 'opt2': 'val1', 'opt3': ['val2','val3']}
will be expanded to:
['--opt1','--opt2=val1','--opt3=val2','--opt3=val3']
:returns: List of formatted options as strings
"""
cmdopts = []
for param, val in self.cmdopts.items():
if val is None:
cmdopts.append(param)
else:
if type(val) == str:
val = [val]
for v in val:
cmdopts.append("{}={}".format(param,v))
return cmdopts
def transfer(self):
""" Abstract method, should be implemented by subclasses """
raise NotImplementedError("This method should be implemented by "\
"subclass")
def validate_src_path(self):
""" Validates that the src_path attribute of the Agent instance.
:raises transfer.TransferError: if src_path is not valid
"""
if self.src_path is None:
raise TransferError(
msg="src_path cannot be None",
src_path=self.src_path,
dest_path=self.dest_path)
if not os.path.exists(self.src_path):
raise TransferError(
msg="src_path '{}' does not exist".format(self.src_path),
src_path=self.src_path,
dest_path=self.dest_path)
def validate_dest_path(self):
""" Validates that the dest_path attribute of the Agent instance.
:raises transfer.TransferError: if dest_path is not valid
"""
if self.dest_path is None:
raise TransferError(
msg="dest_path cannot be None",
src_path=self.src_path,
dest_path=self.dest_path)
def validate_transfer(self):
""" Abstract method, should be implemented by subclasses """
raise NotImplementedError("This method should be implemented by "\
"subclass")
class RsyncAgent(TransferAgent):
""" An agent that knows how to perform an rsync transfer locally or
between hosts. If supplied with a checksum file, the transfer can
be validated on the receiving side.
"""
CMD = "rsync"
DEFAULT_OPTS = {
"-a": None,
}
def __init__(
self,
src_path,
dest_path=None,
remote_host=None,
remote_user=None,
validate=True,
digestfile=None,
opts=None,
**kwargs):
""" Creates an RsyncAgent instance
:param string src_path: the file or folder that should be transferred
:param string dest_path: the destination file or folder
:param string remote_host: the remote host to transfer to.
If None, the transfer will be on the local filesystem
:param string remote_user: the remote user to connect with.
If None, the local user will be used
:param bool validate: whether to validate the transferred files
using a supplied file with checksums
:param string digestfile: a file with checksums for the files to be
transferred. Must be specified if validate is True. The checksum
algorithm will be inferred from the extension of the digest file
:param opts: options that will be passed to the rsync command
"""
super(RsyncAgent, self).__init__(
src_path=src_path,
dest_path=dest_path,
opts=opts or self.DEFAULT_OPTS,
**kwargs)
self.remote_host = remote_host
self.remote_user = remote_user
self.digestfile = digestfile
def transfer(self, transfer_log=None):
"""
Execute the transfer as set up by this instance and, if requested,
validate the transfer.
:param string transfer_log: path prefix to log files where stderr
and stdout streams will be directed if this option is specified
:returns True on success, False if the validation failed
:raises transfer.TransferError: if src_path or dest_path were not valid
:raises transfer.RsyncError: if the rsync command did not exit successfully
"""
self.validate_src_path()
self.validate_dest_path()
command = [self.CMD] + self.format_options() + [self.src_path,self.remote_path()]
try:
call_external_command(
command,
with_log_files=(transfer_log is not None),
prefix=transfer_log)
except subprocess.CalledProcessError as e:
raise RsyncError(e)
return (not self.validate) or self.validate_transfer()
def remote_path(self):
"""
Construct the remote path according to what has been specified
:returns: the remote path string on the form
[remote_user]@[remote_host]:[dest_path]
"""
return "{}{}{}".format(
"{}@".format(self.remote_user) \
if self.remote_user is not None \
else "",
"{}:".format(self.remote_host) \
if self.remote_host is not None \
else "",
self.dest_path \
if self.dest_path is not None \
else ""
)
def validate_dest_path(self):
""" Validates the dest_path, remote_user and remote_host attributes
of this Agent instance.
:raises transfer.TransferError:
if the combination of attributes is not valid
"""
if self.dest_path is None and self.remote_host is None:
raise TransferError(
msg="dest_path and remote_host cannot both be None",
src_path=self.src_path)
if self.remote_user is not None and self.remote_host is None:
raise TransferError(
msg="dest_path cannot be None if remote_user is not None",
src_path=self.src_path)
def validate_transfer(self):
""" Validate the transferred files by computing checksums and comparing
to the pre-computed checksums, supplied in the digestfile attribute
of this Agent instance. The hash algorithm is inferred from the file
extension of the digestfile. The paths of the files to check are
assumed to be relative to the location of the digestfile.
Currently not implemented for remote transfers.
:returns: False if any checksum does not match, or if a file does
not exist. True otherwise.
:raises transfer.RsyncValidationError: if the digestfile was not
supplied
"""
if self.remote_host is not None:
raise NotImplementedError("Validation on remote host not implemented")
try:
with open(self.digestfile) as fh:
hasher = self.digestfile.split('.')[-1]
dpath = os.path.dirname(self.digestfile)
for line in fh:
digest,fpath = line.split()
tfile = os.path.join(dpath,fpath)
if not os.path.exists(tfile) or digest != hashfile(
tfile,
hasher=hasher):
return False
except TypeError as e:
raise RsyncValidationError(
"no digest file specified",
self.src_path,
self.dest_path)
return True
class SymlinkAgent(TransferAgent):
def __init__(self, src_path, dest_path, overwrite=True, relative=True, **kwargs):
""" Creates an SymlinkAgent instance for creating symlinks
:param string src_path: the file or folder that should be symlinked
:param string dest_path: the destination symlink
:param bool overwrite: if true, the destination file or folder will
be overwritten if it already exists
:param bool relative: if true, the destination symlink will be relative
"""
super(SymlinkAgent,self).__init__(
src_path=src_path,
dest_path=dest_path,
**kwargs)
self.overwrite = overwrite
self.relative = relative
def transfer(self):
""" Create the symlink as specified by this SymlinkAgent instance.
:returns: True if the symlink was created successfully, False otherwise
:raises transfer.TransferError:
if src_path or dest_path were not valid
:raises transfer.SymlinkError:
if an error occurred when creating the symlink
"""
self.validate_src_path()
self.validate_dest_path()
if os.path.exists(self.dest_path):
# If the existing target is a symlink that points to the
# source, we're all good
if self.validate_transfer():
logger.debug("target exists and points to the correct "
"source path: '{}'".format(self.src_path))
return True
# If we are not overwriting, return False
if not self.overwrite:
logger.debug("target '{}' exists and will not be "
"overwritten".format(self.dest_path))
return False
# If the target is a mount, let's not mess with it
if os.path.ismount(self.dest_path):
raise SymlinkError("target exists and is a mount")
# If the target is a link or a file, we remove it
if os.path.islink(self.dest_path) or \
os.path.isfile(self.dest_path):
logger.debug("removing existing target file '{}'"
.format(self.dest_path))
try:
os.unlink(self.dest_path)
except OSError as e:
raise SymlinkError(e)
# If the target is a directory, we remove it and
# everything underneath
elif os.path.isdir(self.dest_path):
logger.debug("removing existing target folder '{}'"
.format(self.dest_path))
try:
shutil.rmtree(self.dest_path)
except OSError as e:
raise SymlinkError(e)
# If it's something else, let's bail out
else:
raise SymlinkError("target exists and will not be overwritten")
if not create_folder(os.path.dirname(self.dest_path)):
raise SymlinkError("failed to create target folder hierarchy")
try:
# If we should create a relative symlink, determine the relative path
os.symlink(
os.path.relpath(self.src_path,os.path.dirname(self.dest_path)) \
if self.relative else self.src_path,
self.dest_path)
except OSError as e:
raise SymlinkError(e)
return (not self.validate) or self.validate_transfer()
def validate_transfer(self):
""" Validates the symlinked files by verifying that the dest_path was
created, is a link and resolves to the same file as src_path
:returns: True if link is valid, False otherwise
"""
return os.path.exists(self.dest_path) and \
os.path.islink(self.dest_path) and \
os.path.samefile(self.src_path, self.dest_path)
class TransferError(Exception):
def __init__(self, msg, src_path=None, dest_path=None):
super(TransferError, self).__init__(msg)
self.src_path = src_path
self.dest_path = dest_path
class SymlinkError(TransferError): pass
class RsyncError(TransferError): pass
class RsyncValidationError(TransferError): pass
|
|
'''
------------------------------------------
Red9 Studio Pack: Maya Pipeline Solutions
Author: Mark Jackson
email: rednineinfo@gmail.com
Red9 blog : http://red9-consultancy.blogspot.co.uk/
MarkJ blog: http://markj3d.blogspot.co.uk
------------------------------------------
This is the main unittest for the Red9_Meta module and a good
example of what's expected and what the systems can do on simple data
================================================================
'''
#import pymel.core as pm
import maya.standalone
maya.standalone.initialize(name='python')
import maya.cmds as cmds
import pymel.core as pm
import os
import time
import Red9.core.Red9_Meta as r9Meta
import Red9.startup.setup as r9Setup
r9Setup.start(Menu=False)
#force the upAxis, just in case
r9Setup.mayaUpAxis('y')
class Test_MetaRegistryCalls():
def teardown(self):
cmds.file(new=True,f=True)
r9Meta.registerMClassNodeMapping() # reset the nodeTypes registry
def test_registerMClassNodeMapping(self):
'''
test the registry functions for nodeTypes
'''
cmds.file(new=True,f=True)
r9Meta.MetaClass(name='standardNetworkMetaNode')
assert [cmds.nodeType(n.mNode) for n in r9Meta.getMetaNodes()]==['network']
#register transforms to the NodeTypes
r9Meta.registerMClassNodeMapping(nodeTypes='transform')
print r9Meta.getMClassNodeTypes()
assert r9Meta.getMClassNodeTypes()==sorted(['network', 'objectSet', 'transform'])
new=r9Meta.MetaClass(name='newTransformMetaNode', nodeType='transform')
assert [cmds.nodeType(n.mNode) for n in r9Meta.getMetaNodes()]==['network','transform']
#reset the NodeTypes
r9Meta.resetMClassNodeTypes()
print r9Meta.getMClassNodeTypes()
assert r9Meta.getMClassNodeTypes()==['network','objectSet'] # ,'HIKCharacterNode']
assert [cmds.nodeType(n.mNode) for n in r9Meta.getMetaNodes()]==['network']
def test_getMClassInstances(self):
for mNode in r9Meta.getMClassInstances(r9Meta.MetaHUDNode):
assert issubclass(mNode, r9Meta.MetaHUDNode)
for mNode in r9Meta.getMClassInstances(r9Meta.MetaRig):
assert issubclass(mNode, r9Meta.MetaRig)
def test_getMClassDataFromNode(self):
a=r9Meta.MetaRig(name='rig')
b=r9Meta.MetaRigSubSystem(name='subSub')
assert r9Meta.getMClassDataFromNode(a)=='MetaRig'
assert r9Meta.getMClassDataFromNode(a.mNode)=='MetaRig'
assert r9Meta.getMClassDataFromNode(b)=='MetaRigSubSystem'
assert r9Meta.getMClassDataFromNode(b.mNode)=='MetaRigSubSystem'
class Test_MetaCache():
def teardown(self):
cmds.file(new=True,f=True)
def test_MetaCache(self):
a=r9Meta.MetaRig(name='rig')
dagpath=str(a.mNode)
assert r9Meta.getMetaFromCache(a.mNode)==a
assert r9Meta.getMetaFromCache(dagpath)==a
assert r9Meta.MetaClass(a.mNode)==a
r9Meta.resetCache()
assert not r9Meta.getMetaFromCache(a.mNode)
r9Meta.MetaClass(a.mNode)
assert r9Meta.getMetaFromCache(a.mNode)==a
dagpath=str(a.mNode)
a.delete()
assert not r9Meta.RED9_META_NODECACHE
def test_uuid(self):
a=r9Meta.MetaRig(name='rig')
UUID = a.UUID
assert UUID in r9Meta.RED9_META_NODECACHE
#test the duplicate handler
dup=cmds.duplicate(a.mNode)
assert not dup == a.mNode
nodes=r9Meta.getMetaNodes()
assert len(nodes)==2
assert len(r9Meta.RED9_META_NODECACHE.keys()) == 2
assert r9Meta.RED9_META_NODECACHE[UUID]==a
assert r9Meta.MetaClass(a.mNode).UUID == UUID
def test_wrappedMayaNodes(self):
'''
test how the cache handles non mClass nodes
'''
cmds.polyCube(name='cube1')
n1 = r9Meta.MetaClass('|cube1')
r9Meta.registerMClassNodeCache(n1)
assert r9Meta.RED9_META_NODECACHE['|cube1']==n1
n1.rename('renamedCube1')
assert n1.mNode=='|renamedCube1'
#because in this case we have no UUID's we only store the
#cache against the node name. Theres now a test against
#the MOBject to ensure that things are still correct in the pull
cmds.polyCube(name='cube1')
n2 = r9Meta.MetaClass('|cube1')
assert n2.mNode=='|cube1'
assert not n2.mNode=='renamedCube1'
r9Meta.registerMClassNodeCache(n2)
assert r9Meta.RED9_META_NODECACHE['|cube1']==n2
class Test_MetaClass():
def setup(self):
cmds.file(new=True,f=True)
self.MClass=r9Meta.MetaClass(name='MetaClass_Test')
def teardown(self):
self.setup()
def test_initNew(self):
assert isinstance(self.MClass,r9Meta.MetaClass)
assert self.MClass.mClass=='MetaClass'
assert self.MClass.mNode=='MetaClass_Test'
assert cmds.nodeType(self.MClass.mNode)=='network'
def test_unregisteredNodeType(self):
#new handler will bail if you try and create with an unRegistered nodeType
try:
r9Meta.MetaClass(name='new', nodeType='transform')
print 'Failed - generated new node with unregistered nodeType!'
assert False
except:
assert True
def test_functionCalls(self):
#select
cmds.select(cl=True)
self.MClass.select()
assert cmds.ls(sl=True)[0]=='MetaClass_Test'
#rename
self.MClass.rename('FooBar')
assert self.MClass.mNode=='FooBar'
self.MClass.select()
assert cmds.ls(sl=True)[0]=='FooBar'
#convert
new=r9Meta.convertMClassType(self.MClass,'MetaRig')
assert isinstance(new,r9Meta.MetaRig)
assert self.MClass.mClass=='MetaRig'
#delete
self.MClass.delete()
assert not cmds.objExists('MetaClass_Test')
#isReferenced ?? Why is this failing ??
assert not self.MClass.isReferenced()
def test_isValid(self):
assert self.MClass.isValid() # strange one, isValid fails if the mNode has no connections.... is this a good decision?
cube1=cmds.ls(cmds.polyCube()[0],l=True)[0]
newMeta=r9Meta.MetaClass(cube1)
assert newMeta.isValid()
cmds.delete(newMeta.mNode)
assert not self.MClass.isValid()
def test_mNodeID(self):
assert self.MClass.mNodeID=='MetaClass_Test'
assert cmds.attributeQuery('mNodeID', node=self.MClass.mNode, exists=True)
assert self.MClass.hasAttr('mNodeID')
#lets test standard wrapped handling
cube1=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube2=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube3=cmds.ls(cmds.polyCube()[0],l=True)[0]
cubeMeta=r9Meta.MetaClass(cube1)
assert cubeMeta.mNodeID=='pCube1'
#nest the dag path
cmds.parent(cube1, cube2)
cmds.parent(cube2, cube3)
assert cubeMeta.mNode=='|pCube3|pCube2|pCube1'
cubeMeta=r9Meta.MetaClass('|pCube3|pCube2')
assert cubeMeta.mNodeID=='pCube2'
def test_MObject_Handling(self):
#mNode is now handled via an MObject
assert self.MClass.mNode=='MetaClass_Test'
cmds.rename('MetaClass_Test','FooBar')
assert self.MClass.mNode=='FooBar'
def test_addChildMetaNode(self):
'''
add a new MetaNode as a child of self
'''
newMFacial=self.MClass.addChildMetaNode('MetaFacialRig',attr='Facial',nodeName='FacialNode')
assert isinstance(newMFacial,r9Meta.MetaFacialRig)
assert newMFacial.mNode=='FacialNode'
assert cmds.listConnections('%s.Facial' % self.MClass.mNode,c=True,p=True)==['MetaClass_Test.Facial',
'FacialNode.MetaClass_Test']
assert isinstance(self.MClass.Facial,r9Meta.MetaFacialRig)
assert self.MClass.Facial.mNode=='FacialNode'
def test_addChildMetaNode_ClassAttr(self):
'''
add a new MetaNode as a child of self, passing in class rather than a string
'''
newMFacial=self.MClass.addChildMetaNode(r9Meta.MetaFacialRig,attr='Facial',nodeName='FacialNode')
assert isinstance(newMFacial,r9Meta.MetaFacialRig)
assert newMFacial.mNode=='FacialNode'
assert cmds.listConnections('%s.Facial' % self.MClass.mNode,c=True,p=True)==['MetaClass_Test.Facial',
'FacialNode.MetaClass_Test']
assert isinstance(self.MClass.Facial,r9Meta.MetaFacialRig)
assert self.MClass.Facial.mNode=='FacialNode'
def test_connectionsTo_MetaNodes_child(self):
'''
Test how the code handles connections to other MetaNodes
'''
facialNode=r9Meta.MetaFacialRig(name='FacialNode')
self.MClass.connectChild(facialNode,'Facial')
assert self.MClass.Facial.mNode=='FacialNode'
assert isinstance(self.MClass.Facial, r9Meta.MetaFacialRig)
assert self.MClass.hasAttr('Facial')
assert not facialNode.hasAttr('Facial')
assert facialNode.hasAttr('MetaClass_Test')
assert cmds.listConnections('%s.Facial' % self.MClass.mNode,c=True,p=True)==['MetaClass_Test.Facial',
'FacialNode.MetaClass_Test']
#test disconnect call
self.MClass.disconnectChild(self.MClass.Facial, deleteSourcePlug=True, deleteDestPlug=True)
assert not self.MClass.hasAttr('Facial')
assert not facialNode.hasAttr('MetaClass_Test')
#test the additional attr flag
self.MClass.connectChild(facialNode,'parentAttr','childAttr')
assert cmds.listConnections('%s.parentAttr' % self.MClass.mNode,c=True,p=True)==['MetaClass_Test.parentAttr',
'FacialNode.childAttr']
self.MClass.disconnectChild(self.MClass.parentAttr, deleteSourcePlug=True, deleteDestPlug=True)
assert not self.MClass.hasAttr('parentAttr')
assert not facialNode.hasAttr('childAttr')
def test_connectionsTo_MetaNodes_children(self):
'''
COMPLEX! Test how the code handles connections to other MetaNodes via
connectChildren. Note that currently if the connections are between
MetaNodes then the messageAttr is INDEX managed
'''
master1 = r9Meta.MetaClass(name='master1')
master2 = r9Meta.MetaClass(name='master2')
child1 = r9Meta.MetaClass(name='child1')
child2 = r9Meta.MetaClass(name='child2')
cube = cmds.ls(cmds.polyCube()[0], l=True)[0]
#note mClass instance being passed in
master1.connectChildren([child1,child2,cube],'modules','puppet')
assert cmds.attributeQuery('modules', node=master1.mNode, m=True)
assert cmds.attributeQuery('modules', node=master1.mNode, im=True)
assert master1.modules==['|pCube1',child1,child2]
assert child1.puppet==[master1]
assert child2.puppet==[master1]
assert cmds.attributeQuery('puppet', node=cube, m=True)
assert not cmds.attributeQuery('puppet', node=cube, im=True)
assert cmds.listConnections('%s.puppet' % cube)==['master1']
#mClass mNode being passed in
master2.connectChildren([child1.mNode,child2.mNode,cube],'time','master',force=True)
assert master2.time==['|pCube1', child1, child2]
assert child1.master==[master2]
assert child2.master==[master2]
assert cmds.listConnections('%s.master' % cube)==['master2']
#check previous
assert master1.modules==['|pCube1',child1,child2]
assert child1.puppet==[master1]
assert child2.puppet==[master1]
assert cmds.listConnections('%s.puppet' % cube)==['master1']
master1.connectChildren([child1,child2],'time','master',cleanCurrent=True)
assert master1.time==[child1, child2]
assert sorted(child1.master,key=lambda x:x.mNode)== [master1, master2]
assert sorted(child2.master,key=lambda x:x.mNode)== [master1, master2]
#check previous
assert master2.time==['|pCube1', child1, child2]
assert cmds.listConnections('%s.master' % cube)==['master2']
assert master1.modules==['|pCube1', child1, child2]
assert child1.puppet==[master1]
assert child2.puppet==[master1]
assert cmds.listConnections('%s.puppet' % cube)==['master1']
try:
master1.connectChildren([child1],'time','master')
assert False,'Shouldnt be able to connect the same node multi-times via the same attrs'
except:
assert True
master1.disconnectChild(child2,'time')
assert master1.time==[child1]
assert child2.master==[master2]
#check previous
assert master1.modules==['|pCube1',child1,child2]
assert master2.time==['|pCube1', child1, child2]
master1.disconnectChild(child1)
assert master1.modules==['|pCube1',child2]
assert not master1.hasAttr('time') # cleaned the plug
assert child1.master==[master2]
assert child1.hasAttr('puppet') # ???? FIXME: this is wrong, it should have been cleaned as it's now empty!
#assert not child1.puppet
#check previous
assert master2.time==['|pCube1', child1, child2]
#isChildNode test calls
assert master1.isChildNode(child2.mNode)
assert master1.isChildNode(child2.mNode,'modules','puppet')
assert master1.isChildNode(child2)
assert not master1.isChildNode(child1)
def test_connectionsTo_MayaNodes_Basic(self):
'''
Test how the code handles connections to standard MayaNodes
'''
cube1=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube2=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube3=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube4=cmds.ls(cmds.polyCube()[0],l=True)[0]
#add singular Child
self.MClass.connectChild(cube1,'Singluar')
assert self.MClass.Singluar==[cube1]
#add multiple Children
self.MClass.connectChildren([cube2,cube3],'Multiple')
assert sorted(self.MClass.Multiple)==[cube2,cube3]
#get the MetaNode back from the cube1 connection and retest
found=r9Meta.getConnectedMetaNodes(cube1)[0]
assert isinstance(found,r9Meta.MetaClass)
assert found.mNode=='MetaClass_Test'
assert found.mClass=='MetaClass'
assert sorted(found.Multiple)==[cube2,cube3]
#connect something else to Singluar - cleanCurrent=True by default so unhook cube1
self.MClass.connectChild(cube2,'Singluar')
assert self.MClass.Singluar==[cube2]
assert not cmds.attributeQuery('MetaClassTest',node=cube1,exists=True) # cleaned up after ourselves?
self.MClass.connectChildren([cube3,cube4],'Singluar')
assert sorted(self.MClass.Singluar)==[cube2,cube3,cube4]
#setAttr has cleanCurrent and force set to true so remove all current connections to this attr
self.MClass.Singluar=cube1
assert self.MClass.Singluar==[cube1]
try:
#still thinking about this....if the attr isn't a multi then
#the __setattr__ will fail if you pass in a lots of nodes
self.MClass.Singluar=[cube1,cube2,cube3]
except:
assert True
self.MClass.Multiple=[cube1,cube4]
assert sorted(self.MClass.Multiple)==[cube1,cube4]
def test_connections_called_from_wrappedMClass(self):
'''
lets try connects again and see how it behaves when the mClass calling
the code is just a wrapped standard Maya node
'''
loc1 = cmds.spaceLocator(name="boom")[0]
loc2 = cmds.spaceLocator(name="blah")[0]
loc3 = cmds.spaceLocator(name="weeh")[0]
boom = r9Meta.MetaClass("boom")
assert r9Meta.isMetaNode(boom)
boom.connectChild(loc2,"child","parent")
assert boom.child==['|blah']
boom.connectChild(loc3,"child","parent")
assert boom.child==['|weeh']
assert not cmds.attributeQuery('parent', node=loc2, exists=True)
def test_connectionsTo_MayaNodes_Complex(self):
'''
This is more to sanity check the connection management, when and how nodes get
removed from current connections, when multiples are allowed etc. Also check the
flags 'srcAttr' & 'force'
'''
cube1=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube2=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube3=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube4=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube5=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube6=cmds.ls(cmds.polyCube()[0],l=True)[0]
#test the fact that connectChildren allows multiples, cleanCurrent=False
self.MClass.connectChildren([cube1,cube2,cube3],'con1')
assert sorted(self.MClass.con1)==[cube1,cube2,cube3]
self.MClass.connectChildren([cube4,cube5],'con1')
assert sorted(self.MClass.con1)==[cube1,cube2,cube3,cube4,cube5]
#test the cleanCurrent flag, deletes all current connections before doing the hookup
self.MClass.connectChildren([cube6,cube2],'con1',cleanCurrent=True)
assert sorted(self.MClass.con1)==[cube2,cube6]
#unhook manager for cleanCurrent, no 'srcAttr' flag given so the attr on the node
#used to connect to mNode is the same default for all, mNode.mNodeID. This means
#the node can't be connected to the same mNode more than once by default
self.MClass.connectChild(cube1,'singleAttr1')
assert self.MClass.singleAttr1==[cube1]
self.MClass.connectChild(cube1,'singleAttr2')
assert self.MClass.singleAttr2==[cube1]
assert not self.MClass.singleAttr1==[cube1]
#test multiple connections to the same mNode by specifying the srcAttr used on the
#node itself, stops the node getting mNode.mNodeID attr which is the default
self.MClass.connectChild(cube1, 'singleAttr1', srcAttr='newScrAttr')
assert self.MClass.singleAttr1==[cube1]
assert self.MClass.singleAttr2==[cube1] # is still connected
assert cmds.listConnections('%s.singleAttr1' % self.MClass.mNode,c=True,p=True)==['MetaClass_Test.singleAttr1',
'pCube1.newScrAttr']
#force Flag test
try:
#should fail as cube2 is still connected to the mNode via the 'con1' attr
self.MClass.connectChild(cube2,'singleAttr',force=False)
assert False
except:
assert True
#force cube2's removal from previous attr
self.MClass.connectChild(cube2,'singleAttr',force=True)
assert self.MClass.singleAttr==[cube2]
#addAttr failure hook. cube2 already connected so addAttr now hard coded to fail with warning
try:
self.MClass.addAttr('newAttr',attrType='message', value=cube2)
assert False
except:
assert True
def test_forceFlagReturns(self):
'''
test the _forceAsMeta flag, modifying all returns to be instantiated metaClass objects
This is deep integration and needs careful testing
'''
cube1=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube2=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube3=cmds.ls(cmds.polyCube()[0],l=True)[0]
self.MClass.connectChild(cube1,'singleAttr1')
self.MClass.connectChild(cube2,'singleAttr2')
assert self.MClass.getChildren()==[cube1,cube2]
#throw the flag to force returns
self.MClass._forceAsMeta=True
assert issubclass(type(self.MClass.singleAttr1[0]), r9Meta.MetaClass)
assert self.MClass.singleAttr1[0].mNode==cube1
assert r9Meta.isMetaNode(self.MClass.singleAttr1[0])
nodes=self.MClass.getChildren()
for node in nodes:
assert r9Meta.isMetaNode(node)
assert node.getParentMetaNode()==self.MClass
assert cmds.nodeType(node.mNode)=='transform'
#connection handler
self.MClass.connectChild(r9Meta.MetaClass(cube3),'newConnect')
assert self.MClass.newConnect[0].mNode==cube3
assert self.MClass.isChildNode(cube3)
assert self.MClass.isChildNode(cube3,'newConnect')
def test_connectParent(self):
parent=r9Meta.MetaFacialRig(name='Facial')
self.MClass.connectParent(parent,'FacialNode')
assert parent.getChildMetaNodes()[0]==self.MClass
assert self.MClass.getParentMetaNode()==parent
assert parent.FacialNode==self.MClass
def test_attrLocking(self):
'''
deals with locking and managing locked attrs
'''
self.MClass.addAttr('newTest', 1.0)
assert not self.MClass.attrIsLocked('newTest')
cmds.setAttr('%s.newTest' % self.MClass.mNode, l=True)
assert self.MClass.attrIsLocked('newTest')
self.MClass.attrSetLocked('newTest',False)
assert not self.MClass.attrIsLocked('newTest')
self.MClass.attrSetLocked('newTest',True)
assert self.MClass.attrIsLocked('newTest')
#setAttr also uses this handler to force set locked attrs
self.MClass.newTest=4
assert self.MClass.newTest==4
def test_lockState(self):
assert not self.MClass.lockState
assert not cmds.lockNode(self.MClass.mNode, query=True)[0]
self.MClass.lockState=True
assert cmds.lockNode(self.MClass.mNode, query=True)[0]
def test_addAttrHandling(self):
self.MClass.addAttr('floatAttr', 1, l=True)
assert self.MClass.attrIsLocked('floatAttr')
self.MClass.addAttr('max', 1, max=10)
assert self.MClass.max==1
try:
self.MClass.max=30
except:
assert True
assert cmds.addAttr('%s.max' % self.MClass.mNode, q=True,max=True) == 10
def test_attributeHandling(self):
'''
This tests the standard attribute handing in the MetaClass.__setattr__
'''
node=self.MClass
#standard attribute handling
node.addAttr('stringTest', "this_is_a_string") # create a string attribute
node.addAttr('fltTest', 1.333) # create a float attribute
node.addAttr('fltTest2', 10.5, min=0,max=15) # create a float attribute with min/max
node.addAttr('intTest', 3) # create a int attribute
node.addAttr('boolTest', False) # create a bool attribute
node.addAttr('enum', attrType='enum', enumName='A:B:D:E:F') # create an enum attribute
node.addAttr('doubleTest', attrType='double3', value=(1.12,2.55,5.0))
node.addAttr('doubleTest2', attrType='double3', value=(1.0,2.0,10.0), min=1,max=15)
node.addAttr('doubleArray', attrType='doubleArray', value=(1.0,2.0,10.0))
node.addAttr('doubleArray2', attrType='doubleArray')
#create a string attr with JSON serialized data
testDict={'jsonFloat':1.05,'jsonInt':3,'jsonString':'string says hello','jsonBool':True}
node.addAttr('jsonTest',testDict)
#test the hasAttr call in the baseClass
assert node.hasAttr('stringTest')
assert node.hasAttr('fltTest')
assert node.hasAttr('fltTest2')
assert node.hasAttr('intTest')
assert node.hasAttr('boolTest')
assert node.hasAttr('enum')
assert node.hasAttr('jsonTest')
assert node.hasAttr('doubleTest') # compound3 so it adds 3 child attrs
assert node.hasAttr('doubleTestX')
assert node.hasAttr('doubleTestY')
assert node.hasAttr('doubleTestZ')
assert node.hasAttr('doubleTest2')
assert node.hasAttr('doubleArray')
assert node.hasAttr('doubleArray2')
#test the actual Maya node attributes
#------------------------------------
assert cmds.getAttr('%s.stringTest' % node.mNode, type=True)=='string'
assert cmds.getAttr('%s.fltTest' % node.mNode, type=True)=='double'
assert cmds.getAttr('%s.fltTest2' % node.mNode, type=True)=='double'
assert cmds.getAttr('%s.intTest' % node.mNode, type=True)=='long'
assert cmds.getAttr('%s.boolTest' % node.mNode, type=True)=='bool'
assert cmds.getAttr('%s.enum' % node.mNode, type=True)=='enum'
assert cmds.getAttr('%s.jsonTest' % node.mNode, type=True)=='string'
assert cmds.getAttr('%s.doubleTest' % node.mNode, type=True)=='double3'
assert cmds.getAttr('%s.doubleTestX' % node.mNode, type=True)=='double'
assert cmds.getAttr('%s.doubleTestY' % node.mNode, type=True)=='double'
assert cmds.getAttr('%s.doubleTestZ' % node.mNode, type=True)=='double'
assert cmds.getAttr('%s.doubleArray' % node.mNode, type=True)=='doubleArray'
assert cmds.getAttr('%s.doubleArray2' % node.mNode, type=True)=='doubleArray'
assert cmds.getAttr('%s.stringTest' % node.mNode)=='this_is_a_string'
assert cmds.getAttr('%s.fltTest' % node.mNode)==1.333
assert cmds.getAttr('%s.fltTest2' % node.mNode)==10.5
assert cmds.getAttr('%s.intTest' % node.mNode)==3
assert cmds.getAttr('%s.boolTest' % node.mNode)==False
assert cmds.getAttr('%s.enum' % node.mNode)==0
assert cmds.getAttr('%s.jsonTest' % node.mNode)=='{"jsonFloat": 1.05, "jsonBool": true, "jsonString": "string says hello", "jsonInt": 3}'
assert cmds.getAttr('%s.doubleTest' % node.mNode)==[(1.12,2.55,5.0)]
assert cmds.getAttr('%s.doubleTestX' % node.mNode)==1.12
assert cmds.getAttr('%s.doubleTestY' % node.mNode)==2.55
assert cmds.getAttr('%s.doubleTestZ' % node.mNode)==5.0
assert cmds.getAttr('%s.doubleArray' % node.mNode)==[1.0,2.0,10.0]
assert not cmds.getAttr('%s.doubleArray2' % node.mNode) # added with no initial value
assert cmds.attributeQuery('fltTest2',node=node.mNode, max=True)==[15.0]
assert cmds.attributeQuery('doubleTest2X',node=node.mNode, min=True)==[1.0]
assert cmds.attributeQuery('doubleTest2Y',node=node.mNode, max=True)==[15.0]
#now check the MetaClass __getattribute__ and __setattr__ calls
#--------------------------------------------------------------
assert node.intTest==3
node.intTest=10 # set back to the MayaNode
assert node.intTest==10
#float ========================
assert node.fltTest==1.333
node.fltTest=3.55 # set the float attr
assert node.fltTest==3.55
#float with min, max kws passed
try:
#try setting the value past it's max
node.fltTest2=22
assert False
except:
assert True
try:
#try setting the value past it's min
node.fltTest2=-5
assert False
except:
assert True
#string =======================
assert node.stringTest=='this_is_a_string'
node.stringTest="change the text" # set the string attr
assert node.stringTest=='change the text'
#bool =========================
assert node.boolTest==False
node.boolTest=True # set bool
assert node.boolTest==True
#enum =========================
assert node.enum==0
node.enum='B'
assert node.enum==1
node.enum=2
assert node.enum==2
#json string handlers =========
assert type(node.jsonTest)==dict
assert node.jsonTest=={'jsonFloat':1.05,'jsonInt':3,'jsonString':'string says hello','jsonBool':True}
assert node.jsonTest['jsonFloat']==1.05
assert node.jsonTest['jsonInt']==3
assert node.jsonTest['jsonString']=='string says hello'
assert node.jsonTest['jsonBool']==True
#double3 ======================
assert node.doubleTest==(1.12,2.55,5.0)
assert node.doubleTestX==1.12
assert node.doubleTestY==2.55
assert node.doubleTestZ==5.0
node.doubleTest=(2.0,44.2,22.0)
assert node.doubleTest==(2.0,44.2,22.0)
try:
#try setting the value past it's max
node.doubleTest2=(0,1,22)
assert False
except:
assert True
try:
#try setting the value past it's max
node.doubleTest2X=-10
assert False
except:
assert True
#doubleArray ======================
assert node.doubleArray==[1.0,2.0,10.0]
node.doubleArray=[20,5.5,3.1]
assert node.doubleArray==[20,5.5,3.1]
node.doubleArray=[]
assert not node.doubleArray
assert not node.doubleArray2
node.doubleArray2=[1.1,5,6,7,1.1]
assert node.doubleArray2==[1.1,5,6,7,1.1]
del(node.boolTest)
assert cmds.objExists(node.mNode)
assert not node.hasAttr('boolTest')
assert not cmds.attributeQuery('boolTest',node=node.mNode,exists=True)
def test_attributeHandlingMath(self):
'''
This tests the python attr handling with math args
'''
node=self.MClass
node.addAttr('fltTest', 1.5)
node.fltTest+=1
assert node.fltTest==2.5
node.fltTest-=1
assert node.fltTest == 1.5
node.fltTest*=2
assert node.fltTest == 3.0
def test_attributeHandling_MessageAttr(self):
'''
test the messageLink handling in the __setattr__ block and addAttr
this doesn't do any connectChild/children testing
'''
node=self.MClass
#make sure we collect LONG names for these as all wrappers deal with longName
cube1=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube2=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube3=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube4=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube5=cmds.ls(cmds.polyCube()[0],l=True)[0]
cube6=cmds.ls(cmds.polyCube()[0],l=True)[0]
node.addAttr('msgMultiTest', value=[cube1,cube2], attrType='message') # multi Message attr
node.addAttr('msgSingleTest', value=cube3, attrType='messageSimple') # non-multi message attr
assert node.hasAttr('msgMultiTest')
assert node.hasAttr('msgSingleTest')
assert cmds.getAttr('%s.msgMultiTest' % node.mNode, type=True)=='message'
assert cmds.getAttr('%s.msgSingleTest' % node.mNode, type=True)=='message'
assert cmds.attributeQuery('msgMultiTest',node=node.mNode, multi=True)==True
assert cmds.attributeQuery('msgSingleTest',node=node.mNode, multi=True)==False
#NOTE : cmds returns shortName, but all MetaClass attrs are always longName
assert cmds.listConnections('%s.msgMultiTest' % node.mNode,c=True,p=True)==['MetaClass_Test.msgMultiTest',
'pCube2.MetaClass_Test',
'MetaClass_Test.msgMultiTest',
'pCube1.MetaClass_Test']
assert cmds.listConnections('%s.msgSingleTest' % node.mNode,c=True,p=True)==['MetaClass_Test.msgSingleTest',
'pCube3.MetaClass_Test']
assert sorted(node.msgMultiTest)==[cube1,cube2]
assert node.msgSingleTest==[cube3]
#test the reconnect handler via the setAttr
node.msgMultiTest=[cube5,cube6]
assert sorted(node.msgMultiTest)==[cube5,cube6]
assert not cmds.attributeQuery('MetaClass_Test',node=cube1, exists=True) # disconnect should delete the old connection attr
assert cmds.listConnections('%s.msgMultiTest' % node.mNode,c=True,p=True)==['MetaClass_Test.msgMultiTest',
'pCube6.MetaClass_Test',
'MetaClass_Test.msgMultiTest',
'pCube5.MetaClass_Test']
node.msgMultiTest=[cube1,cube2,cube4,cube6]
assert sorted(node.msgMultiTest)==[cube1,cube2,cube4,cube6]
assert sorted(cmds.listConnections('%s.msgMultiTest' % node.mNode))==['pCube1','pCube2','pCube4','pCube6']
node.msgSingleTest=cube4
assert node.msgSingleTest==[cube4]
assert cmds.listConnections('%s.msgSingleTest' % node.mNode)==['pCube4'] # cmds returns a list
node.msgSingleTest=cube3
assert node.msgSingleTest==[cube3]
assert cmds.listConnections('%s.msgSingleTest' % node.mNode)==['pCube3'] # cmds returns a list
def test_longJsonDumps(self):
'''
Test the handling of LONG serialized Json data - testing the 16bit string attrTemplate handling
NOTE: if you set a string to over 32,767 chars and don't lock the attr once made, selecting
the textField in the AttributeEditor will truncate the data, hence this test!
'''
data= "x" * 40000
self.MClass.addAttr('json_test', data)
assert len(self.MClass.json_test)==40000
#save the file and reload to ensure the attr is consistent
cmds.file(rename=os.path.join(r9Setup.red9ModulePath(),'tests','testFiles','deleteMe.ma'))
cmds.file(save=True,type='mayaAscii')
cmds.file(new=True,f=True)
cmds.file(os.path.join(r9Setup.red9ModulePath(),'tests','testFiles','deleteMe.ma'),open=True,f=True)
mClass=r9Meta.getMetaNodes()[0]
assert len(mClass.json_test)
def test_castingStandardNode(self):
mLambert=r9Meta.MetaClass('lambert1')
#mLambert is just a Python MetaNode and doesn't exist as a MayaNode
mLambert.diffuse=0.5
assert '%0.2f' % cmds.getAttr('lambert1.diffuse')=='0.50'
mLambert.diffuse=0.77
assert '%0.2f' % cmds.getAttr('lambert1.diffuse')=='0.77'
mLambert.color=(0.5, 0.5, 0.5)
assert mLambert.color==(0.5,0.5,0.5)
assert cmds.getAttr('lambert1.color')==[(0.5, 0.5, 0.5)]
mLambert.color=(1.0, 0.0, 0.5)
print mLambert.color
assert mLambert.color==(1.0, 0.0, 0.5)
assert cmds.getAttr('lambert1.color')==[(1.0, 0.0, 0.5)]
def test_convertMClassType(self):
'''
test the class convert call, designed to mutate a given
metaClass to another and re-instantiate it
'''
# MClass Mutation
assert type(self.MClass)==r9Meta.MetaClass
converted=r9Meta.convertMClassType(self.MClass,'MetaRig')
assert type(converted)==r9Meta.MetaRig
assert converted.mClass=='MetaRig'
mNodes=r9Meta.getMetaNodes()
assert len(mNodes)==1
def test_referenceHandler(self):
#TODO: Fill Test
#referenceNode
#referencePath
#nameSpace
#nameSpaceFull
#nameSpaceFull(asList=True)
pass
def test_isSystemRoot(self):
#TODO: Fill Test
pass
def test_renameChildLinks(self):
#TODO: Fill Test
pass
class Test_Generic_SearchCalls():
'''
Basic Generic search calls at scene level
'''
def setup(self):
cmds.file(new=True,f=True)
self.metaA=r9Meta.MetaClass(name='MetaClass_Test')
self.metaB=r9Meta.MetaRig(name='MetaRig_Test')
self.metaC=r9Meta.MetaRigSupport(name='MetaRigSupport_Test')
self.metaD=r9Meta.MetaFacialRig(name='MetaFacialRig_Test')
self.metaE=r9Meta.MetaFacialRigSupport(name='MetaFacialRigSupport_Test')
def teardown(self):
self.setup()
def test_isMetaNode(self):
assert r9Meta.isMetaNode('MetaRig_Test')
assert r9Meta.isMetaNode(self.metaA)
assert r9Meta.isMetaNode('MetaRig_Test', mTypes=['MetaRig'])
assert r9Meta.isMetaNode('MetaRig_Test', mTypes='MetaRig')
assert not r9Meta.isMetaNode('MetaRig_Test', mTypes='MonkeyBollox')
assert not r9Meta.isMetaNode('MetaRig_Test', mTypes='MetaFacialRigSupport_Test')
assert r9Meta.isMetaNode('MetaRig_Test', mTypes=[r9Meta.MetaRig])
assert r9Meta.isMetaNode('MetaRig_Test', mTypes=r9Meta.MetaRig)
assert r9Meta.isMetaNode(self.metaB, mTypes=r9Meta.MetaRig)
assert not r9Meta.isMetaNode(self.metaB, mTypes=r9Meta.MetaRigSupport)
cube1=cmds.ls(cmds.polyCube()[0],l=True)[0]
assert not r9Meta.isMetaNode(cube1)
def test_isMetaNodeInherited(self):
assert r9Meta.isMetaNodeInherited('MetaFacialRig_Test','MetaRig')
assert r9Meta.isMetaNodeInherited(self.metaD, 'MetaRig')
assert r9Meta.isMetaNodeInherited('MetaFacialRig_Test','MetaClass')
assert not r9Meta.isMetaNodeInherited('MetaFacialRig_Test','MetaRigSubSystem')
assert r9Meta.isMetaNodeInherited('MetaFacialRig_Test',r9Meta.MetaRig)
assert r9Meta.isMetaNodeInherited(self.metaD, r9Meta.MetaRig)
assert r9Meta.isMetaNodeInherited('MetaFacialRig_Test',r9Meta.MetaClass)
assert not r9Meta.isMetaNodeInherited('MetaFacialRig_Test',r9Meta.MetaRigSubSystem)
def test_getMetaNodes(self):
nodes=sorted(r9Meta.getMetaNodes(),key=lambda x: x.mClass.upper())
assert [n.mClass for n in nodes]==['MetaClass','MetaFacialRig','MetaFacialRigSupport','MetaRig','MetaRigSupport']
def test_getMetaNodes_mTypes(self):
#mTypes test
nodes=sorted(r9Meta.getMetaNodes(mTypes=['MetaRig','MetaFacialRig']),key=lambda x: x.mClass.upper())
assert [n.mClass for n in nodes]==['MetaFacialRig','MetaRig']
nodes=r9Meta.getMetaNodes(dataType=None, mTypes=['MetaRig'])
assert nodes==['MetaRig_Test']
def test_getMetaNodes_mTypesAsClass(self):
#mTypes test passing in Class rather than string
nodes=sorted(r9Meta.getMetaNodes(mTypes=[r9Meta.MetaRig,r9Meta.MetaFacialRig]),key=lambda x: x.mClass.upper())
assert [n.mClass for n in nodes]==['MetaFacialRig','MetaRig']
nodes=r9Meta.getMetaNodes(dataType=None, mTypes=[r9Meta.MetaRig])
assert nodes==['MetaRig_Test']
def test_getMetaNodes_mInstances(self):
#mInstances tests
nodes=r9Meta.getMetaNodes(dataType=None, mInstances=['MetaRig'])
assert nodes==['MetaFacialRig_Test', 'MetaRig_Test']
nodes=r9Meta.getMetaNodes(mInstances=['MetaRig'])
assert [n.mNodeID for n in nodes]==['MetaFacialRig_Test', 'MetaRig_Test']
nodes=r9Meta.getMetaNodes(mInstances=['MetaClass'])
assert sorted([n.mNode for n in nodes])==['MetaClass_Test',
'MetaFacialRigSupport_Test',
'MetaFacialRig_Test',
'MetaRigSupport_Test',
'MetaRig_Test']
def test_getMetaNodes_mInstancesAsClass(self):
#mInstances tests passing in Class rather than string
nodes=r9Meta.getMetaNodes(dataType=None, mInstances=[r9Meta.MetaRig])
assert nodes==['MetaFacialRig_Test', 'MetaRig_Test']
nodes=r9Meta.getMetaNodes(mInstances=[r9Meta.MetaRig])
assert [n.mNodeID for n in nodes]==['MetaFacialRig_Test', 'MetaRig_Test']
nodes=r9Meta.getMetaNodes(mInstances=[r9Meta.MetaClass])
assert sorted([n.mNode for n in nodes])==['MetaClass_Test',
'MetaFacialRigSupport_Test',
'MetaFacialRig_Test',
'MetaRigSupport_Test',
'MetaRig_Test']
def test_getMetaNodes_mAttrs(self):
assert r9Meta.getMetaNodes(mAttrs='version=1')[0].mNodeID=='MetaRig_Test'
def test_getMetaNodes_mGrps(self):
#TODO: Fill Test
pass
class Test_MetaRig():
def setup(self):
cmds.file(os.path.join(r9Setup.red9ModulePath(),'tests','testFiles','MetaRig_baseTests.ma'),open=True,f=True)
self.mRig=self.addMetaRig()
def teardown(self):
self.setup()
def addMetaRig(self):
'''
Add a basic MetaRig network to the file including MetaSubSystems and MetaSupport
'''
mRig=r9Meta.MetaRig(name='RED_Rig')
#Link the MainCtrl , this is used as Root for some of the functions
mRig.addRigCtrl('World_Ctrl','Main', mirrorData={'side':'Centre', 'slot':1})
#Left Arm SubMeta Systems --------------------------
lArm= mRig.addMetaSubSystem('Arm', 'Left', nodeName='L_ArmSystem', attr='L_ArmSystem')
lArm.addRigCtrl('L_Wrist_Ctrl','L_Wrist', mirrorData={'side':'Left','slot':1})
lArm.addRigCtrl('L_Elbow_Ctrl','L_Elbow', mirrorData={'side':'Left','slot':2})
lArm.addRigCtrl('L_Clav_Ctrl','L_Clav', mirrorData={'side':'Left','slot':3})
#Left Arm Fingers ---------------------------------
lArm.addMetaSubSystem('Fingers','Left')
lArm.L_Fingers_System.addRigCtrl('Character1_LeftHandThumb1','ThumbRoot')
lArm.L_Fingers_System.addRigCtrl('Character1_LeftHandIndex1','IndexRoot')
lArm.L_Fingers_System.addRigCtrl('Character1_LeftHandMiddle1','MiddleRoot')
lArm.L_Fingers_System.addRigCtrl('Character1_LeftHandRing1','RingRoot')
lArm.L_Fingers_System.addRigCtrl('Character1_LeftHandPinky1','PinkyRoot')
#Left Leg SubMeta Systems --------------------------
lLeg= mRig.addMetaSubSystem('Leg', 'Left', nodeName='L_LegSystem')
lLeg.addRigCtrl('L_Foot_Ctrl','L_Foot', mirrorData={'side':'Left','slot':4})
lLeg.addRigCtrl('L_Knee_Ctrl', 'L_Knee', mirrorData={'side':'Left','slot':5})
#Right Arm SubMeta Systems --------------------------
rArm= mRig.addMetaSubSystem('Arm', 'Right', nodeName='R_ArmSystem', attr='R_ArmSystem')
rArm.addRigCtrl('R_Wrist_Ctrl','R_Wrist', mirrorData={'side':'Right','slot':1})
rArm.addRigCtrl('R_Elbow_Ctrl','R_Elbow', mirrorData={'side':'Right','slot':2})
rArm.addRigCtrl('R_Clav_Ctrl','R_Clav', mirrorData={'side':'Right', 'slot':3})
#Right Arm Fingers ----------------------------------
rArm.addMetaSubSystem('Fingers','Right')
rArm.R_Fingers_System.addRigCtrl('Character1_RightHandThumb1','ThumbRoot')
rArm.R_Fingers_System.addRigCtrl('Character1_RightHandIndex1','IndexRoot')
rArm.R_Fingers_System.addRigCtrl('Character1_RightHandMiddle1','MiddleRoot')
rArm.R_Fingers_System.addRigCtrl('Character1_RightHandRing1','RingRoot')
rArm.R_Fingers_System.addRigCtrl('Character1_RightHandPinky1','PinkyRoot')
#Right Leg SubMeta System --------------------------
rLeg= mRig.addMetaSubSystem('Leg', 'Right', nodeName='R_LegSystem', attr='R_LegSystem')
rLeg.addRigCtrl('R_Foot_Ctrl','R_Foot', mirrorData={'side':'Right','slot':4})
rLeg.addRigCtrl('R_Knee_Ctrl', 'R_Knee', mirrorData={'side':'Right','slot':5})
#Spine SubMeta System -------------------------------
spine= mRig.addMetaSubSystem('Spine', 'Centre', nodeName='SpineSystem', attr='SpineSystem')
spine.addRigCtrl('COG__Ctrl','Root', mirrorData={'side':'Centre','slot':2})
spine.addRigCtrl('Hips_Ctrl','Hips', mirrorData={'side':'Centre','slot':3})
spine.addRigCtrl('Chest_Ctrl','Chest', mirrorData={'side':'Centre','slot':4})
spine.addRigCtrl('Head_Ctrl','Head', mirrorData={'side':'Centre','slot':5})
#add SupportMeta Nodes ------------------------------
#this is a really basic demo, for the sake of this you could
#just wire all the support nodes to one MetaSupport, but this
#shows what you could do for really complex setups
lArm.addSupportMetaNode('L_ArmSupport')
lArm.L_ArmSupport.addSupportNode('ikHandle1','IKHandle')
rArm.addSupportMetaNode('R_ArmSupport')
rArm.R_ArmSupport.addSupportNode('ikHandle2','IKHandle')
lLeg.addSupportMetaNode('L_LegSupport')
lLeg.L_LegSupport.addSupportNode('ikHandle5','IKHandle')
rLeg.addSupportMetaNode('R_LegSupport')
rLeg.R_LegSupport.addSupportNode('ikHandle6','IKHandle')
spine.addSupportMetaNode('SpineSupport')
spine.SpineSupport.addSupportNode('ikHandle3','NeckIK')
spine.SpineSupport.addSupportNode('ikHandle4','SpineIK')
return mRig
def test_basicRigStructure(self):
mRig=r9Meta.getConnectedMetaSystemRoot('L_Wrist_Ctrl')
assert type(mRig)==r9Meta.MetaRig
assert mRig.mNode=='RED_Rig'
assert mRig.CTRL_Main[0]=='|World_Ctrl'
#test the Left Arm wires
assert type(mRig.L_ArmSystem)==r9Meta.MetaRigSubSystem
assert mRig.L_ArmSystem.mNode=='L_ArmSystem'
assert mRig.L_ArmSystem.systemType=='Arm'
assert mRig.L_ArmSystem.mirrorSide==1
assert mRig.L_ArmSystem.CTRL_L_Wrist[0]=='|World_Ctrl|L_Wrist_Ctrl'
assert mRig.L_ArmSystem.CTRL_L_Elbow[0]=='|World_Ctrl|COG__Ctrl|L_Elbow_Ctrl'
ctrl=r9Meta.MetaClass(mRig.L_ArmSystem.CTRL_L_Wrist[0])
assert ctrl.mirrorSide==1 # ?????? consistency of attrs on node and metaSubsystems!!!!!!!
assert ctrl.mirrorIndex==1
#test the Right Leg wires
assert type(mRig.R_LegSystem)==r9Meta.MetaRigSubSystem
assert r9Meta.isMetaNode('R_LegSystem')
assert mRig.R_LegSystem.mNode=='R_LegSystem'
assert mRig.R_LegSystem.systemType=='Leg'
assert mRig.R_LegSystem.mirrorSide==2
assert mRig.R_LegSystem.CTRL_R_Foot[0]=='|World_Ctrl|R_Foot_grp|R_Foot_Ctrl'
assert mRig.R_LegSystem.CTRL_R_Knee[0]=='|World_Ctrl|R_Knee_Ctrl'
ctrl=r9Meta.MetaClass(mRig.R_LegSystem.CTRL_R_Foot[0])
assert ctrl.mirrorSide==2 # ?????? consistency of attrs on node and metaSubsystems!!!!!!!
assert ctrl.mirrorIndex==4
#test the Left Leg wires
#:NOTE slight difference in the naming as we didn't pass in the attr when making the subSystem
assert type(mRig.L_Leg_System)==r9Meta.MetaRigSubSystem
assert r9Meta.isMetaNode('L_LegSystem')
assert mRig.L_Leg_System.mNode=='L_LegSystem'
assert mRig.L_Leg_System.systemType=='Leg'
assert mRig.L_Leg_System.mirrorSide==1
#test the Spine wires
assert type(mRig.SpineSystem)==r9Meta.MetaRigSubSystem
assert mRig.SpineSystem.mNode=='SpineSystem'
assert mRig.SpineSystem.systemType=='Spine'
assert mRig.SpineSystem.mirrorSide==0
assert mRig.SpineSystem.CTRL_Hips[0]=='|World_Ctrl|COG__Ctrl|Hips_Ctrl'
assert mRig.SpineSystem.CTRL_Chest[0]=='|World_Ctrl|COG__Ctrl|Chest_Ctrl'
ctrl=r9Meta.MetaClass(mRig.SpineSystem.CTRL_Chest[0])
assert ctrl.mirrorSide==0 # ?????? consistency of attrs on node and metaSubsystems!!!!!!!
assert ctrl.mirrorIndex==4
#test the MetaRigSupport nodes
assert type(mRig.L_ArmSystem.L_ArmSupport)==r9Meta.MetaRigSupport
assert mRig.L_ArmSystem.L_ArmSupport.mNode=='L_ArmSupport'
assert mRig.L_ArmSystem.L_ArmSupport.SUP_IKHandle[0]=='|World_Ctrl|L_Wrist_Ctrl|ikHandle1'
assert mRig.SpineSystem.SpineSupport.SUP_NeckIK[0]=='|World_Ctrl|COG__Ctrl|Chest_Ctrl|Head_grp|Head_Ctrl|ikHandle3'
assert mRig.SpineSystem.SpineSupport.SUP_SpineIK[0]=='|World_Ctrl|COG__Ctrl|Chest_Ctrl|ikHandle4'
def test_getRigCtrls(self):
assert self.mRig.getRigCtrls()==['|World_Ctrl']
assert self.mRig.getRigCtrls(walk=True)==['|World_Ctrl',
'|World_Ctrl|R_Foot_grp|R_Foot_Ctrl',
'|World_Ctrl|R_Knee_Ctrl',
'|World_Ctrl|L_Wrist_Ctrl',
'|World_Ctrl|COG__Ctrl|L_Elbow_Ctrl',
'|World_Ctrl|COG__Ctrl|Chest_Ctrl|L_Clav_Ctrl',
'|World_Ctrl|R_Wrist_Ctrl',
'|World_Ctrl|COG__Ctrl|R_Elbow_Ctrl',
'|World_Ctrl|COG__Ctrl|Chest_Ctrl|R_Clav_Ctrl',
'|World_Ctrl|COG__Ctrl',
'|World_Ctrl|COG__Ctrl|Hips_Ctrl',
'|World_Ctrl|COG__Ctrl|Chest_Ctrl',
'|World_Ctrl|COG__Ctrl|Chest_Ctrl|Head_grp|Head_Ctrl',
'|World_Ctrl|L_Foot_grp|L_Foot_Ctrl',
'|World_Ctrl|L_Knee_Ctrl',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandThumb1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandIndex1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandMiddle1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandRing1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandPinky1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_LeftShoulder|Character1_LeftArm|Character1_LeftForeArm|Character1_LeftHand|Character1_LeftHandThumb1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_LeftShoulder|Character1_LeftArm|Character1_LeftForeArm|Character1_LeftHand|Character1_LeftHandIndex1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_LeftShoulder|Character1_LeftArm|Character1_LeftForeArm|Character1_LeftHand|Character1_LeftHandMiddle1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_LeftShoulder|Character1_LeftArm|Character1_LeftForeArm|Character1_LeftHand|Character1_LeftHandRing1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_LeftShoulder|Character1_LeftArm|Character1_LeftForeArm|Character1_LeftHand|Character1_LeftHandPinky1']
assert self.mRig.R_ArmSystem.getRigCtrls()==['|World_Ctrl|R_Wrist_Ctrl',
'|World_Ctrl|COG__Ctrl|R_Elbow_Ctrl',
'|World_Ctrl|COG__Ctrl|Chest_Ctrl|R_Clav_Ctrl']
assert self.mRig.R_ArmSystem.getRigCtrls(walk=True)==['|World_Ctrl|R_Wrist_Ctrl',
'|World_Ctrl|COG__Ctrl|R_Elbow_Ctrl',
'|World_Ctrl|COG__Ctrl|Chest_Ctrl|R_Clav_Ctrl',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandThumb1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandIndex1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandMiddle1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandRing1',
'|Character1_Pelvis|Character1_Spine|Character1_Spine2|Character1_RightShoulder|Character1_RightArm|Character1_RightForeArm|Character1_RightHand|Character1_RightHandPinky1']
assert self.mRig.R_ArmSystem.getChildren(walk=False)==['|World_Ctrl|R_Wrist_Ctrl',
'|World_Ctrl|COG__Ctrl|R_Elbow_Ctrl',
'|World_Ctrl|COG__Ctrl|Chest_Ctrl|R_Clav_Ctrl']
def test_getNodeConnectionMetaDataMap(self):
assert self.mRig.getNodeConnectionMetaDataMap('|World_Ctrl|L_Foot_grp|L_Foot_Ctrl') == {'metaAttr': u'CTRL_L_Foot', 'metaNodeID': u'L_LegSystem'}
def test_getNodeConnectionMetaDataMap_mTypes(self):
#TODO: Fill Test
#assert self.mRig.getNodeConnectionMetaDataMap(mTypes=??)
pass
def test_getNodeConnections(self):
assert self.mRig.L_Leg_System.getNodeConnections('|World_Ctrl|L_Foot_grp|L_Foot_Ctrl') == ['CTRL_L_Foot']
def test_getChildren_mAttrs(self):
#TODO: Fill Test
pass
def test_getChildren_asMap(self):
#TODO: Fill Test
pass
def test_getConnectedMetaNodes(self):
#TODO: Fill Test
pass
def test_getConnectedMetaNodes_mTypes(self):
#TODO: Fill Test
pass
def test_getConnectedMetaNodes_mInstances(self):
#TODO: Fill Test
pass
def test_getConnectedMetaNodes_mAttrs(self):
#TODO: Fill Test
pass
def test_getSkeletonRoots(self):
#TODO: Fill Test
pass
def test_addSupportNode(self):
#TODO: Fill Test
pass
def test_set_ctrlColour(self):
#TODO: Fill Test
pass
def test_mirrorDataHandling(self):
#TODO: Fill Test
#loadMirrorDataMap
#getMirrorData
#getMirror_opposites
#getMirror_ctrlSets
#mirror
pass
def test_poseCache(self):
#poseCacheStore
#poseCacheLoad
#poseCompare
pass
def test_nodeVisibility(self):
#nodeVisibility
#hideNodes
#unHideNodes
pass
def test_attrMap(self):
#loadAttrMap
#saveAttrMap
pass
class Test_SpeedTesting():
'''
These are all set to fail so that we get the capture output that we can bracktrack
'''
def setup(self):
cmds.file(new=True,f=True)
def test_standardNodes(self):
cubes=[]
for i in range(1,10000):
cubes.append(cmds.polyCube(name='a%s' % i)[0])
now = time.clock()
c = [r9Meta.MetaClass(p, autofill=False) for p in cubes]
print 'SPEED: Standard Wrapped Nodes : autofill=False: %s' % str(time.clock() - now)
print 'Timer should be around 4.26 secs on work PC'
print 'Timer should be around 3.28 secs on the Beast'
# verify against pymel, I know we're still a lot slower
now = time.clock()
c = pm.ls(cubes)
print 'Timer Pymel Reference : ', time.clock() - now
print '\n'
r9Meta.resetCache()
now = time.clock()
c = [r9Meta.MetaClass(p, autofill='all') for p in cubes]
print 'SPEED: Standard Wrapped Nodes : autofill=all : %s' % str(time.clock() - now)
print 'Timer should be around 14.6 secs on work PC'
print 'Timer should be around 10.48 secs on the Beast'
assert False
def test_MetaNodes(self):
nodes=[]
for i in range(1,10000):
nodes.append(r9Meta.MetaClass(name='a%s' % i).mNode)
r9Meta.resetCache()
now = time.clock()
c = [r9Meta.MetaClass(p, autofill=False) for p in nodes]
print 'SPEED: Meta Nodes : autofill=all : %s' % str(time.clock() - now)
print 'Timer should be around 8.5 secs on work PC'
print 'Timer should be around 6.14 secs on the Beast'
print '\n'
now = time.clock()
c = [r9Meta.MetaClass(p, autofill=False) for p in nodes]
print 'SPEED: Meta Nodes from Cache : %s' % str(time.clock() - now)
print 'Timer should be around 8.5 secs on work PC'
print 'Timer should be around 6.15 secs on the Beast'
assert False
class Test_MetaNetworks():
'''
Test the network walking and get commands on a larger network
'''
def setup(self):
cmds.file(os.path.join(r9Setup.red9ModulePath(),'tests','testFiles','Meta_Network_WalkTest.ma'),open=True,f=True)
self.mRig=r9Meta.getMetaNodes(mTypes='MetaRig')[0]
def teardown(self):
self.setup()
def buildNetwork(self):
'''
code that built the test scene above
'''
mRig=r9Meta.MetaRig()
mRig.addMetaSubSystem('Spine','Centre')
mRig.C_Spine_System.addMetaSubSystem('Arm','Left')
mRig.C_Spine_System.L_Arm_System.addMetaSubSystem('other','Left')
mRig.C_Spine_System.L_Arm_System.addSupportMetaNode('L_Arm_Support')
mRig.C_Spine_System.L_Arm_System.L_other_System.addMetaSubSystem('Fingers','Left')
mRig.C_Spine_System.addMetaSubSystem('Arm','Right')
mRig.C_Spine_System.R_Arm_System.addMetaSubSystem('other','Right')
mRig.C_Spine_System.R_Arm_System.addSupportMetaNode('R_Arm_Support')
mRig.C_Spine_System.R_Arm_System.R_other_System.addMetaSubSystem('Fingers','Right')
mRig.addMetaSubSystem('Leg','Right')
mRig.R_Leg_System.addMetaSubSystem('Toes','Right')
mRig.addMetaSubSystem('Leg','Left')
mRig.R_Leg_System.addMetaSubSystem('Toes','Left')
def test_getChildMetaNodes(self):
'''
note that the order of this is important as the return is
managed by the depth of the connections
'''
nodes=self.mRig.getChildMetaNodes(walk=True)
assert [node.mNodeID for node in nodes]==['R_Leg_System',
'L_Leg_System',
'C_Spine_System',
'L_Toes_System',
'L_Arm_System',
'R_Toes_System',
'R_Arm_System',
'L_other_System',
'R_other_System',
'L_Arm_Support',
'R_Arm_Support',
'R_Fingers_System',
'L_Fingers_System']
nodes=self.mRig.C_Spine_System.getChildMetaNodes(walk=True)
assert [node.mNodeID for node in nodes]==['R_Arm_System',
'L_Arm_System',
'R_other_System',
'L_other_System',
'R_Arm_Support',
'L_Arm_Support',
'L_Fingers_System',
'R_Fingers_System']
nodes=self.mRig.C_Spine_System.getChildMetaNodes(walk=False)
assert [node.mNodeID for node in nodes]==['R_Arm_System','L_Arm_System']
def test_getParentSystems(self):
assert r9Meta.getConnectedMetaSystemRoot('L_Fingers_System').mNode=='MetaRig'
assert r9Meta.getConnectedMetaSystemRoot('L_Toes_System').mNode=='MetaRig'
assert self.mRig.C_Spine_System.L_Arm_System.getParentMetaNode().mNodeID=='C_Spine_System'
assert self.mRig.C_Spine_System.L_Arm_System.L_Arm_Support.getParentMetaNode().mNodeID=='L_Arm_System'
def test_getConnectedMetaSystemRoot_args(self):
#add in a few additional systemRoots and check the filter args
assert r9Meta.getConnectedMetaSystemRoot('L_Fingers_System').mNode=='MetaRig'
r_legSys=r9Meta.getConnectedMetaNodes('MetaRig')[0]
l_legSys=r9Meta.getConnectedMetaNodes('MetaRig')[2]
#add 2 new mNodes that are effectively now BOTH additional parents to the system
newparent1=r9Meta.MetaClass(name='exportNode')
newparent2=r9Meta.MetaFacialRig(name='facial')
r_legSys.connectParent(newparent1,attr='ExportRoot')
l_legSys.connectParent(newparent2,attr='Facial')
assert r9Meta.getConnectedMetaSystemRoot('L_Leg_System').mClass=='MetaRig'
assert r9Meta.getConnectedMetaSystemRoot('L_Leg_System', mTypes=['MetaFacialRig']).mClass=='MetaFacialRig'
assert r9Meta.getConnectedMetaSystemRoot('R_Fingers_System').mClass=='MetaRig'
assert not r9Meta.getConnectedMetaSystemRoot('R_Fingers_System', ignoreTypes='MetaRig')
assert r9Meta.getConnectedMetaSystemRoot('R_Toes_System').mClass=='MetaClass'
assert r9Meta.getConnectedMetaSystemRoot('R_Toes_System', ignoreTypes=['MetaClass']).mClass=='MetaRig'
def test_getChildMetaNodes_mAttrs(self):
#TODO: this code needs fixing and then testing!!!!!!
#self.mRig.getChildMetaNodes(walk=True,mAttrs='ddddddddddddd')
pass
def test_getMetaNodes_mAttrs(self):
mNodes=r9Meta.getMetaNodes(mAttrs='mirrorSide=1')
assert sorted([node.mNodeID for node in mNodes])==['L_Arm_System',
'L_Fingers_System',
'L_Leg_System',
'L_Toes_System',
'L_other_System']
mNodes=r9Meta.getMetaNodes(mAttrs=['mirrorSide=1','systemType=Arm'])
assert sorted([node.mNodeID for node in mNodes])==['L_Arm_System']
mNodes=r9Meta.getMetaNodes(mAttrs=['systemType=Leg'])
assert sorted([node.mNodeID for node in mNodes])== ['L_Leg_System', 'R_Leg_System']
|
|
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Glance"""
from collections import abc
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import opts
from oslo_policy import policy
from glance.common import exception
import glance.domain.proxy
from glance.i18n import _, _LW
from glance import policies
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_ENFORCER = None
# TODO(gmann): Remove setting the default value of config policy_file
# once oslo_policy change the default value to 'policy.yaml'.
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
DEFAULT_POLICY_FILE = 'policy.yaml'
opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
class Enforcer(policy.Enforcer):
"""Responsible for loading and enforcing rules"""
def __init__(self, suppress_deprecation_warnings=False):
"""Init an policy Enforcer.
:param suppress_deprecation_warnings: Whether to suppress the
deprecation warnings.
"""
super(Enforcer, self).__init__(CONF, use_conf=True, overwrite=False)
# NOTE(gmann): Explicitly disable the warnings for policies
# changing their default check_str. For new RBAC, all the policy
# defaults have been changed and warning for each policy started
# filling the logs limit for various tool.
# Once we move to new defaults only world then we can enable these
# warning again.
self.suppress_default_change_warnings = True
if suppress_deprecation_warnings:
self.suppress_deprecation_warnings = True
self.register_defaults(policies.list_rules())
if CONF.enforce_secure_rbac and CONF.oslo_policy.enforce_new_defaults:
LOG.warning(_LW(
"Deploying glance with secure RBAC personas enabled via "
"`glance-api.conf [DEFAULT] enforce_secure_rbac=True` and "
"`glance-api.conf [oslo_policy] enforce_new_defaults=True` "
"is marked as EXPERIMENTAL in Wallaby. The status of this "
"feature will graduate to SUPPORTED as glance adopts more "
"personas, specifically for system-scope."
))
def add_rules(self, rules):
"""Add new rules to the Rules object"""
self.set_rules(rules, overwrite=False, use_conf=self.use_conf)
def enforce(self, context, action, target, registered=True):
"""Verifies that the action is valid on the target in this context.
:param context: Glance request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises: `glance.common.exception.Forbidden`
:returns: A non-False value if access is allowed.
"""
if registered and action not in self.registered_rules:
raise policy.PolicyNotRegistered(action)
try:
return super(Enforcer, self).enforce(action, target,
context,
do_raise=True,
exc=exception.Forbidden,
action=action)
except policy.InvalidScope:
raise exception.Forbidden(action=action)
def check(self, context, action, target, registered=True):
"""Verifies that the action is valid on the target in this context.
:param context: Glance request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:returns: A non-False value if access is allowed.
"""
if registered and action not in self.registered_rules:
raise policy.PolicyNotRegistered(action)
return super(Enforcer, self).enforce(action,
target,
context)
def check_is_admin(self, context):
"""Check if the given context is associated with an admin role,
as defined via the 'context_is_admin' RBAC rule.
:param context: Glance request context
:returns: A non-False value if context role is admin.
"""
return self.check(context, 'context_is_admin', context.to_dict())
def get_enforcer():
CONF([], project='glance')
global _ENFORCER
if _ENFORCER is None:
_ENFORCER = Enforcer()
return _ENFORCER
class ImageRepoProxy(glance.domain.proxy.Repo):
def __init__(self, image_repo, context, policy):
self.context = context
self.policy = policy
self.image_repo = image_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def get(self, image_id):
image = super(ImageRepoProxy, self).get(image_id)
target = self._build_image_target(image)
try:
self.policy.enforce(self.context, 'get_image', target)
except exception.Forbidden:
# NOTE (abhishekk): Returning 404 Not Found as the
# image is outside of this user's project
msg = _("No image found with ID %s") % image_id
raise exception.NotFound(msg)
return image
def _build_image_target(self, image):
"""Build a policy enforcement target for an image.
:param image: An instance of `glance.domain.Image`
:returns: a dictionary representing the image for policy enforcement
"""
target = dict(ImageTarget(image))
target['project_id'] = image.owner
# We do this so that members of the image can pass the policy for
# getting an image shared with their project. An alternative would be
# to update the image owner, or project_id, to match the member ID,
# tricking the policy enforcer into thinking image members are actually
# image owners. But, that feels less clear without understanding the
# code that makes that assumption, especially for operators reading
# check strings. Using this approach forces the check_str to be more
# descriptive.
members = self.image_repo.db_api.image_member_find(
self.context, image_id=image.image_id)
# FIXME(lbragstad): Remove this if statement if/when oslo.policy
# supports lists of target attributes via substitution, allowing us to
# do something like:
#
# target['member_ids'] = set(m['member'] for m in members)
for member in members:
if member['member'] == self.context.project_id:
target['member_id'] = member['member']
break
return target
def list(self, *args, **kwargs):
# FIXME(lbragstad): This is a hack to get policy to pass because we
# don't have a reasonable target to use for all images. We set the
# target project_id to the context project_id, which effectively
# ensures the context project_id matches itself in policy enforcement.
#
# A more accurate and cleaner way to implement this, and filtering,
# would be to query all images from the database, build a target for
# each image, and then iterate over each image and call policy
# enforcement. If the user passes policy enforcement, append the image
# to the list of filtered images. If not, the image should be removed
# from the list of images returned to the user.
target = {'project_id': self.context.project_id}
self.policy.enforce(self.context, 'get_images', target)
return super(ImageRepoProxy, self).list(*args, **kwargs)
def save(self, image, from_state=None):
target = dict(image.target)
self.policy.enforce(self.context, 'modify_image', target)
return super(ImageRepoProxy, self).save(image, from_state=from_state)
def add(self, image):
target = dict(image.target)
self.policy.enforce(self.context, 'add_image', target)
return super(ImageRepoProxy, self).add(image)
def _enforce_image_visibility(policy, context, visibility, target):
if visibility == 'public':
policy.enforce(context, 'publicize_image', target)
elif visibility == 'community':
policy.enforce(context, 'communitize_image', target)
class ImageProxy(glance.domain.proxy.Image):
def __init__(self, image, context, policy):
self.image = image
self.target = ImageTarget(image)
self.context = context
self.policy = policy
super(ImageProxy, self).__init__(image)
@property
def visibility(self):
return self.image.visibility
@visibility.setter
def visibility(self, value):
target = dict(self.target)
_enforce_image_visibility(self.policy, self.context, value, target)
self.image.visibility = value
@property
def locations(self):
return ImageLocationsProxy(self.image.locations,
self.context, self.policy)
@locations.setter
def locations(self, value):
if not isinstance(value, (list, ImageLocationsProxy)):
raise exception.Invalid(_('Invalid locations: %s') % value)
self.policy.enforce(self.context, 'set_image_location', self.target)
new_locations = list(value)
if (set([loc['url'] for loc in self.image.locations]) -
set([loc['url'] for loc in new_locations])):
self.policy.enforce(self.context, 'delete_image_location',
self.target)
self.image.locations = new_locations
def delete(self):
target = dict(self.target)
self.policy.enforce(self.context, 'delete_image', target)
return self.image.delete()
def deactivate(self):
LOG.debug('Attempting deactivate')
target = dict(ImageTarget(self.image))
self.policy.enforce(self.context, 'deactivate', target=target)
LOG.debug('Deactivate allowed, continue')
self.image.deactivate()
def reactivate(self):
LOG.debug('Attempting reactivate')
target = dict(ImageTarget(self.image))
self.policy.enforce(self.context, 'reactivate', target=target)
LOG.debug('Reactivate allowed, continue')
self.image.reactivate()
def get_data(self, *args, **kwargs):
target = dict(ImageTarget(self.image))
self.policy.enforce(self.context, 'download_image', target)
return self.image.get_data(*args, **kwargs)
def set_data(self, *args, **kwargs):
return self.image.set_data(*args, **kwargs)
class ImageMemberProxy(glance.domain.proxy.ImageMember):
def __init__(self, image_member, context, policy):
super(ImageMemberProxy, self).__init__(image_member)
self.image_member = image_member
self.context = context
self.policy = policy
class ImageFactoryProxy(glance.domain.proxy.ImageFactory):
def __init__(self, image_factory, context, policy):
self.image_factory = image_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(ImageFactoryProxy, self).__init__(image_factory,
proxy_class=ImageProxy,
proxy_kwargs=proxy_kwargs)
def new_image(self, **kwargs):
# If we reversed the order of this method and did the policy
# enforcement on the way out instead of before we build the image
# target reference, we could use the actual image as a target instead
# of building a faux target with one attribute.
target = {}
target['project_id'] = kwargs.get('owner', None)
_enforce_image_visibility(self.policy, self.context,
kwargs.get('visibility'), target)
return super(ImageFactoryProxy, self).new_image(**kwargs)
class ImageMemberFactoryProxy(glance.domain.proxy.ImageMembershipFactory):
def __init__(self, member_factory, context, policy):
super(ImageMemberFactoryProxy, self).__init__(
member_factory,
proxy_class=ImageMemberProxy,
proxy_kwargs={'context': context, 'policy': policy})
class ImageMemberRepoProxy(glance.domain.proxy.Repo):
def __init__(self, member_repo, image, context, policy):
self.member_repo = member_repo
self.image = image
self.target = ImageTarget(image)
self.context = context
self.policy = policy
def add(self, member):
target = dict(self.target)
target['project_id'] = self.context.project_id
self.policy.enforce(self.context, 'add_member', target)
self.member_repo.add(member)
def get(self, member_id):
# NOTE(lbragstad): We set the project_id of the target to be the
# project_id of the context object, which is effectively a no-op
# because we're checking the context.project_id matches the
# context.project_id. This is a bandaid to allow project-members and
# project-readers to view shared images owned by their project, or to
# view images shared with them by another project. Glance's database
# layer filters the images by ownership and membership, based on the
# context object and administrative checks. If or when that code is
# pulled into a higher level and if we have a list of members for an
# image, we can write a more accurate target.
target = dict(self.target)
# We can't set the project_id as the image project_id because that
# wouldn't allow image members to pass the policy check. We need the
# list of image members to build an accurate target.
target['project_id'] = self.context.project_id
self.policy.enforce(self.context, 'get_member', target)
return self.member_repo.get(member_id)
def save(self, member, from_state=None):
target = dict(self.target)
target['project_id'] = self.context.project_id
self.policy.enforce(self.context, 'modify_member', target)
self.member_repo.save(member, from_state=from_state)
def list(self, *args, **kwargs):
target = dict(self.target)
target['project_id'] = self.context.project_id
self.policy.enforce(self.context, 'get_members', target)
return self.member_repo.list(*args, **kwargs)
def remove(self, member):
target = dict(self.target)
target['project_id'] = self.context.project_id
self.policy.enforce(self.context, 'delete_member', target)
self.member_repo.remove(member)
class ImageLocationsProxy(object):
__hash__ = None
def __init__(self, locations, context, policy):
self.locations = locations
self.context = context
self.policy = policy
def __copy__(self):
return type(self)(self.locations, self.context, self.policy)
def __deepcopy__(self, memo):
# NOTE(zhiyan): Only copy location entries, others can be reused.
return type(self)(copy.deepcopy(self.locations, memo),
self.context, self.policy)
def _get_checker(action, func_name):
def _checker(self, *args, **kwargs):
target = {}
if self.context.project_id:
target['project_id'] = self.context.project_id
self.policy.enforce(self.context, action, target)
method = getattr(self.locations, func_name)
return method(*args, **kwargs)
return _checker
count = _get_checker('get_image_location', 'count')
index = _get_checker('get_image_location', 'index')
__getitem__ = _get_checker('get_image_location', '__getitem__')
__contains__ = _get_checker('get_image_location', '__contains__')
__len__ = _get_checker('get_image_location', '__len__')
__cast = _get_checker('get_image_location', '__cast')
__cmp__ = _get_checker('get_image_location', '__cmp__')
__iter__ = _get_checker('get_image_location', '__iter__')
append = _get_checker('set_image_location', 'append')
extend = _get_checker('set_image_location', 'extend')
insert = _get_checker('set_image_location', 'insert')
reverse = _get_checker('set_image_location', 'reverse')
__iadd__ = _get_checker('set_image_location', '__iadd__')
__setitem__ = _get_checker('set_image_location', '__setitem__')
pop = _get_checker('delete_image_location', 'pop')
remove = _get_checker('delete_image_location', 'remove')
__delitem__ = _get_checker('delete_image_location', '__delitem__')
__delslice__ = _get_checker('delete_image_location', '__delslice__')
del _get_checker
class TaskProxy(glance.domain.proxy.Task):
def __init__(self, task, context, policy):
self.task = task
self.context = context
self.policy = policy
super(TaskProxy, self).__init__(task)
class TaskStubProxy(glance.domain.proxy.TaskStub):
def __init__(self, task_stub, context, policy):
self.task_stub = task_stub
self.context = context
self.policy = policy
super(TaskStubProxy, self).__init__(task_stub)
class TaskRepoProxy(glance.domain.proxy.TaskRepo):
def __init__(self, task_repo, context, task_policy):
self.context = context
self.policy = task_policy
self.task_repo = task_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(TaskRepoProxy,
self).__init__(task_repo,
task_proxy_class=TaskProxy,
task_proxy_kwargs=proxy_kwargs)
# TODO(lbragstad): Move this to the tasks api itself
def get(self, task_id):
self.policy.enforce(self.context, 'get_task', {})
return super(TaskRepoProxy, self).get(task_id)
# TODO(lbragstad): Move this to the tasks api itself
def add(self, task):
self.policy.enforce(self.context, 'add_task', {})
super(TaskRepoProxy, self).add(task)
# TODO(lbragstad): Remove this after Xena
def save(self, task):
self.policy.enforce(self.context, 'modify_task', {})
super(TaskRepoProxy, self).save(task)
class TaskStubRepoProxy(glance.domain.proxy.TaskStubRepo):
def __init__(self, task_stub_repo, context, task_policy):
self.context = context
self.policy = task_policy
self.task_stub_repo = task_stub_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(TaskStubRepoProxy,
self).__init__(task_stub_repo,
task_stub_proxy_class=TaskStubProxy,
task_stub_proxy_kwargs=proxy_kwargs)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_tasks', {})
return super(TaskStubRepoProxy, self).list(*args, **kwargs)
class TaskFactoryProxy(glance.domain.proxy.TaskFactory):
def __init__(self, task_factory, context, policy):
self.task_factory = task_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(TaskFactoryProxy, self).__init__(
task_factory,
task_proxy_class=TaskProxy,
task_proxy_kwargs=proxy_kwargs)
class ImageTarget(abc.Mapping):
SENTINEL = object()
def __init__(self, target):
"""Initialize the object
:param target: Object being targeted
"""
self.target = target
self._target_keys = [k for k in dir(ImageProxy)
if not k.startswith('__')
# NOTE(lbragstad): The locations attributes is an
# instance of ImageLocationsProxy, which isn't
# serialized into anything oslo.policy can use. If
# we need to use locations in policies, we need to
# modify how we represent those location objects
# before we call enforcement with target
# information. Omitting for not until that is a
# necessity.
if not k == 'locations'
if not callable(getattr(ImageProxy, k))]
def __getitem__(self, key):
"""Return the value of 'key' from the target.
If the target has the attribute 'key', return it.
:param key: value to retrieve
"""
key = self.key_transforms(key)
value = getattr(self.target, key, self.SENTINEL)
if value is self.SENTINEL:
extra_properties = getattr(self.target, 'extra_properties', None)
if extra_properties is not None:
value = extra_properties[key]
else:
value = None
return value
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __len__(self):
length = len(self._target_keys)
length += len(getattr(self.target, 'extra_properties', {}))
return length
def __iter__(self):
for key in self._target_keys:
yield key
for key in getattr(self.target, 'extra_properties', {}).keys():
yield key
for alias in ['project_id']:
yield alias
def key_transforms(self, key):
transforms = {
'id': 'image_id',
'project_id': 'owner',
'member_id': 'member',
}
return transforms.get(key, key)
# Metadef Namespace classes
class MetadefNamespaceProxy(glance.domain.proxy.MetadefNamespace):
def __init__(self, namespace, context, policy):
self.namespace_input = namespace
self.context = context
self.policy = policy
super(MetadefNamespaceProxy, self).__init__(namespace)
def delete(self):
self.policy.enforce(self.context, 'delete_metadef_namespace', {})
return super(MetadefNamespaceProxy, self).delete()
class MetadefNamespaceRepoProxy(glance.domain.proxy.MetadefNamespaceRepo):
def __init__(self, namespace_repo, context, namespace_policy):
self.context = context
self.policy = namespace_policy
self.namespace_repo = namespace_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefNamespaceRepoProxy,
self).__init__(namespace_repo,
namespace_proxy_class=MetadefNamespaceProxy,
namespace_proxy_kwargs=proxy_kwargs)
def get(self, namespace):
self.policy.enforce(self.context, 'get_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).get(namespace)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_namespaces', {})
return super(MetadefNamespaceRepoProxy, self).list(*args, **kwargs)
def save(self, namespace):
self.policy.enforce(self.context, 'modify_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).save(namespace)
def add(self, namespace):
self.policy.enforce(self.context, 'add_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).add(namespace)
def remove(self, namespace):
self.policy.enforce(self.context, 'delete_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).remove(namespace)
def remove_tags(self, namespace):
self.policy.enforce(self.context, 'delete_metadef_tags', {})
return super(MetadefNamespaceRepoProxy, self).remove_tags(namespace)
class MetadefNamespaceFactoryProxy(
glance.domain.proxy.MetadefNamespaceFactory):
def __init__(self, meta_namespace_factory, context, policy):
self.meta_namespace_factory = meta_namespace_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefNamespaceFactoryProxy, self).__init__(
meta_namespace_factory,
meta_namespace_proxy_class=MetadefNamespaceProxy,
meta_namespace_proxy_kwargs=proxy_kwargs)
# Metadef Object classes
class MetadefObjectProxy(glance.domain.proxy.MetadefObject):
def __init__(self, meta_object, context, policy):
self.meta_object = meta_object
self.context = context
self.policy = policy
super(MetadefObjectProxy, self).__init__(meta_object)
def delete(self):
self.policy.enforce(self.context, 'delete_metadef_object', {})
return super(MetadefObjectProxy, self).delete()
class MetadefObjectRepoProxy(glance.domain.proxy.MetadefObjectRepo):
def __init__(self, object_repo, context, object_policy):
self.context = context
self.policy = object_policy
self.object_repo = object_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefObjectRepoProxy,
self).__init__(object_repo,
object_proxy_class=MetadefObjectProxy,
object_proxy_kwargs=proxy_kwargs)
def get(self, namespace, object_name):
self.policy.enforce(self.context, 'get_metadef_object', {})
return super(MetadefObjectRepoProxy, self).get(namespace, object_name)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_objects', {})
return super(MetadefObjectRepoProxy, self).list(*args, **kwargs)
def save(self, meta_object):
self.policy.enforce(self.context, 'modify_metadef_object', {})
return super(MetadefObjectRepoProxy, self).save(meta_object)
def add(self, meta_object):
self.policy.enforce(self.context, 'add_metadef_object', {})
return super(MetadefObjectRepoProxy, self).add(meta_object)
def remove(self, meta_object):
self.policy.enforce(self.context, 'delete_metadef_object', {})
return super(MetadefObjectRepoProxy, self).remove(meta_object)
class MetadefObjectFactoryProxy(glance.domain.proxy.MetadefObjectFactory):
def __init__(self, meta_object_factory, context, policy):
self.meta_object_factory = meta_object_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefObjectFactoryProxy, self).__init__(
meta_object_factory,
meta_object_proxy_class=MetadefObjectProxy,
meta_object_proxy_kwargs=proxy_kwargs)
# Metadef ResourceType classes
class MetadefResourceTypeProxy(glance.domain.proxy.MetadefResourceType):
def __init__(self, meta_resource_type, context, policy):
self.meta_resource_type = meta_resource_type
self.context = context
self.policy = policy
super(MetadefResourceTypeProxy, self).__init__(meta_resource_type)
def delete(self):
self.policy.enforce(self.context,
'remove_metadef_resource_type_association', {})
return super(MetadefResourceTypeProxy, self).delete()
class MetadefResourceTypeRepoProxy(
glance.domain.proxy.MetadefResourceTypeRepo):
def __init__(self, resource_type_repo, context, resource_type_policy):
self.context = context
self.policy = resource_type_policy
self.resource_type_repo = resource_type_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefResourceTypeRepoProxy, self).__init__(
resource_type_repo,
resource_type_proxy_class=MetadefResourceTypeProxy,
resource_type_proxy_kwargs=proxy_kwargs)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'list_metadef_resource_types', {})
return super(MetadefResourceTypeRepoProxy, self).list(*args, **kwargs)
def get(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_resource_type', {})
return super(MetadefResourceTypeRepoProxy, self).get(*args, **kwargs)
def add(self, resource_type):
self.policy.enforce(self.context,
'add_metadef_resource_type_association', {})
return super(MetadefResourceTypeRepoProxy, self).add(resource_type)
def remove(self, *args, **kwargs):
self.policy.enforce(self.context,
'remove_metadef_resource_type_association', {})
return super(MetadefResourceTypeRepoProxy,
self).remove(*args, **kwargs)
class MetadefResourceTypeFactoryProxy(
glance.domain.proxy.MetadefResourceTypeFactory):
def __init__(self, resource_type_factory, context, policy):
self.resource_type_factory = resource_type_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefResourceTypeFactoryProxy, self).__init__(
resource_type_factory,
resource_type_proxy_class=MetadefResourceTypeProxy,
resource_type_proxy_kwargs=proxy_kwargs)
# Metadef namespace properties classes
class MetadefPropertyProxy(glance.domain.proxy.MetadefProperty):
def __init__(self, namespace_property, context, policy):
self.namespace_property = namespace_property
self.context = context
self.policy = policy
super(MetadefPropertyProxy, self).__init__(namespace_property)
def delete(self):
self.policy.enforce(self.context, 'remove_metadef_property', {})
return super(MetadefPropertyProxy, self).delete()
class MetadefPropertyRepoProxy(glance.domain.proxy.MetadefPropertyRepo):
def __init__(self, property_repo, context, object_policy):
self.context = context
self.policy = object_policy
self.property_repo = property_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefPropertyRepoProxy, self).__init__(
property_repo,
property_proxy_class=MetadefPropertyProxy,
property_proxy_kwargs=proxy_kwargs)
def get(self, namespace, property_name):
self.policy.enforce(self.context, 'get_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).get(namespace,
property_name)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_properties', {})
return super(MetadefPropertyRepoProxy, self).list(
*args, **kwargs)
def save(self, namespace_property):
self.policy.enforce(self.context, 'modify_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).save(
namespace_property)
def add(self, namespace_property):
self.policy.enforce(self.context, 'add_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).add(
namespace_property)
def remove(self, *args, **kwargs):
self.policy.enforce(self.context, 'remove_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).remove(*args, **kwargs)
class MetadefPropertyFactoryProxy(glance.domain.proxy.MetadefPropertyFactory):
def __init__(self, namespace_property_factory, context, policy):
self.namespace_property_factory = namespace_property_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefPropertyFactoryProxy, self).__init__(
namespace_property_factory,
property_proxy_class=MetadefPropertyProxy,
property_proxy_kwargs=proxy_kwargs)
# Metadef Tag classes
class MetadefTagProxy(glance.domain.proxy.MetadefTag):
def __init__(self, meta_tag, context, policy):
self.context = context
self.policy = policy
super(MetadefTagProxy, self).__init__(meta_tag)
def delete(self):
self.policy.enforce(self.context, 'delete_metadef_tag', {})
return super(MetadefTagProxy, self).delete()
class MetadefTagRepoProxy(glance.domain.proxy.MetadefTagRepo):
def __init__(self, tag_repo, context, tag_policy):
self.context = context
self.policy = tag_policy
self.tag_repo = tag_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefTagRepoProxy,
self).__init__(tag_repo,
tag_proxy_class=MetadefTagProxy,
tag_proxy_kwargs=proxy_kwargs)
def get(self, namespace, tag_name):
self.policy.enforce(self.context, 'get_metadef_tag', {})
return super(MetadefTagRepoProxy, self).get(namespace, tag_name)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_tags', {})
return super(MetadefTagRepoProxy, self).list(*args, **kwargs)
def save(self, meta_tag):
self.policy.enforce(self.context, 'modify_metadef_tag', {})
return super(MetadefTagRepoProxy, self).save(meta_tag)
def add(self, meta_tag):
self.policy.enforce(self.context, 'add_metadef_tag', {})
return super(MetadefTagRepoProxy, self).add(meta_tag)
def add_tags(self, meta_tags):
self.policy.enforce(self.context, 'add_metadef_tags', {})
return super(MetadefTagRepoProxy, self).add_tags(meta_tags)
def remove(self, meta_tag):
self.policy.enforce(self.context, 'delete_metadef_tag', {})
return super(MetadefTagRepoProxy, self).remove(meta_tag)
class MetadefTagFactoryProxy(glance.domain.proxy.MetadefTagFactory):
def __init__(self, meta_tag_factory, context, policy):
self.meta_tag_factory = meta_tag_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefTagFactoryProxy, self).__init__(
meta_tag_factory,
meta_tag_proxy_class=MetadefTagProxy,
meta_tag_proxy_kwargs=proxy_kwargs)
|
|
#!/usr/bin/python
import sys
from copy import deepcopy
import rospy
import rospkg
import baxter_interface
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
# Quaternion,
)
from std_msgs.msg import Header
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
class Calibrate(object):
"""
This class defines the calibration for the arm (which by default
is the left bottom_pos).
"""
def __init__(self, limb='left'):
self._rp = rospkg.RosPack()
self._config_path = self._rp.get_path('baxter_mill') + '/config/'
self._limb = limb
self._baxter_limb = baxter_interface.Limb(self._limb)
self._neutral_pos = {}
self._picking_pos = {}
self._neutral_bool = False
self._picking_bool = True
self._mill_pos = {}
self._picking_pos = {}
self.br_pos = {}
self._the_pose = Pose()
self._should_io = baxter_interface.DigitalIO(self._limb +
'_shoulder_button')
self._dash_io = baxter_interface.DigitalIO(self._limb +
'_upper_button')
self._circle_io = baxter_interface.DigitalIO(self._limb +
'_lower_button')
ik_srv = "ExternalTools/" + limb + "/PositionKinematicsNode/IKService"
self._iksvc = rospy.ServiceProxy(ik_srv, SolvePositionIK)
self._ikreq = SolvePositionIKRequest()
self._circle_io.state_changed.connect(self._default_points)
self.done_calibration = False
def _find_joint_position(self, pose, x_off=0.0, y_off=0.0, z_off=0.0):
'''
Finds the joint position of the arm given some pose and the
offsets from it (to avoid opening the structure all the time
outside of the function).
'''
ik_request = SolvePositionIKRequest()
the_pose = deepcopy(pose)
the_pose['position'] = Point(x=pose['position'].x + x_off,
y=pose['position'].y + y_off,
z=pose['position'].z + z_off)
approach_pose = Pose()
approach_pose.position = the_pose['position']
approach_pose.orientation = the_pose['orientation']
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
pose_req = PoseStamped(header=hdr, pose=approach_pose)
ik_request.pose_stamp.append(pose_req)
resp = self._iksvc(ik_request)
return dict(zip(resp.joints[0].name, resp.joints[0].position))
def _blink_light(self, state):
"""
Blinks a Digital Output on then off.
"""
io_component=self._limb+"_itb_light_outer"
b = baxter_interface.digital_io.DigitalIO(io_component)
print "Initial state: ", b.state
b.set_output(state)
def _generate_picking_positions(self):
# now generate picking positions
missed_pos = []
bottom_pose = self._picking_pose
counter = 0
for y in range(5):
for x in range(2):
counter += 1
if counter == 9:
continue
if counter == 10:
counter = 9
t = "p" + str(counter)
print t
x_o = y * 0.065 * -1
y_o = 0.065 * x
bottom_pos = self._find_joint_position(
bottom_pose,
x_off=x_o,
y_off=y_o
)
top_pos = self._find_joint_position(
bottom_pose,
x_off=x_o,
y_off=y_o,
z_off=0.10
)
rospy.sleep(0.1)
self._picking_pos[t] = [bottom_pos, top_pos]
if len(self._picking_pos[t][0]) == 0:
missed_pos.append((t, "bottom"))
if len(self._picking_pos[t][1]) == 0:
missed_pos.append((t, "top"))
return missed_pos
def _default_points(self, value):
"""
Registers the picking point
"""
if value:
if len(self._picking_pos) == 0 and self._limb == "left":
self._blink_light(True)
# Record default position
print 'Recording picking location'
self._picking_pose = self._baxter_limb.endpoint_pose()
self._generate_picking_positions()
self._blink_light(False)
# Registers the central neutral point. Otherwise the arm
# could move somewhere below the table while moving to the
# chessboard positions.
elif len(self._neutral_pos) == 0:
self._blink_light(True)
# Record neutral position
print 'Recording neutral location'
self._neutral_pos = self._baxter_limb.joint_angles()
self._neutral_bool = False
rospy.sleep(0.5)
self._blink_light(False)
elif len(self.br_pos) == 0:
self._blink_light(True)
print 'Recording pick location'
self.br_pos[0] = self._baxter_limb.joint_angles()
self._the_pose = self._baxter_limb.endpoint_pose()
self.br_pos[1] = self._find_joint_position(
self._the_pose,
z_off=0.10)
self._blink_light(False)
else:
print "Stop pressing! You have already calibrated!"
def _get_mill_pos(self, x, y, limb):
alph = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
acceptable = {'left': ['a1', 'a4', 'a7', 'b2', 'b4', 'b6', 'c3',
'c4', 'c5', 'd1', 'd2', 'd3', 'd5', 'd6',
'd7', 'e3', 'e4', 'e5', 'f2', 'f4', 'f6',
'g1', 'g4', 'g7']}
mill_x = alph[x]
mill = mill_x + str(y+1)
if mill in acceptable[limb]:
return mill
else:
return None
def _generate_right_positions(self):
pass
def _generate_left_positions(self):
"""
Generates positions given position g,7 has been registered.
WARNING: Make sure chessboard is parallel to robot.
Returns a list of non-generated positions (most likely
to be empty)
"""
if len(self.br_pos) != 0:
missed_pos = []
cur_bottom_pose = self._the_pose
for y in range(6, -1, -1):
for x in range(6, -1, -1):
x_o = (6 - y) * 0.065
y_o = 0.065 * (6 - x) * -1
# t = (7 - y, 7 - x) # yep
t = self._get_mill_pos(x, y, self._limb)
if not t:
continue
print t
bottom_pos = self._find_joint_position(
cur_bottom_pose,
x_off=x_o,
y_off=y_o
)
top_pos = self._find_joint_position(
cur_bottom_pose,
x_off=x_o,
y_off=y_o,
z_off=0.10
)
rospy.sleep(0.1)
self._mill_pos[t] = [bottom_pos, top_pos]
if len(self._mill_pos[t][0]) == 0:
missed_pos.append((t, "bottom"))
if len(self._mill_pos[t][1]) == 0:
missed_pos.append((t, "top"))
return missed_pos
def _save_config(self, file):
"""
Saves positions to config file.
"""
print "Saving your positions to file!"
f = open(file, 'w')
for x in range(1,10):
t = "p" + str(x)
f.write(str(t) + "=" + str(self._picking_pos[t]) + '\n')
f.write('neutral_pos=' + str(self._neutral_pos) + '\n')
for x in range(7):
for y in range(7):
t = self._get_mill_pos(x, y, self._limb)
if not t:
continue
f.write(str(t) + "=" + str(self._mill_pos[t]) + '\n')
f.close()
def get_locations(self):
"""
Main function of the class. Runs the calibrate procedure.
"""
self.done_calibration = False
while not self.done_calibration:
self.read_file = raw_input("Are you sure you really want to"
" overwrite your previous changes"
"(y/n)? ")
if self.read_file != 'y' and self.read_file != 'n':
print "You must answer 'y' or 'n'"
elif self.read_file == 'n':
print "Alright then: using previous values."
return
else:
if self._limb == "right":
self.calibrate_right()
else:
self.calibrate_left()
def calibrate_right(self):
pass
def calibrate_left(self):
print ("Move the " + self._limb + " arm to p1 (bottom-left) "
"position and "
"press the circle button ")
while(len(self._picking_pos) == 0 and not rospy.is_shutdown()):
rospy.sleep(0.1)
print ("Default gripping position - Registered.")
print ("Move the " + self._limb + " arm to the neutral "
"position and press the circle button ")
while(len(self._neutral_pos) == 0 and not rospy.is_shutdown()):
rospy.sleep(0.1)
print ("Neutral gripping position - Registered.")
print ("Move same arm to (g,7) position and press the"
"circle button")
while(len(self.br_pos) == 0 and not rospy.is_shutdown()):
rospy.sleep(0.1)
print "Well done!"
print "Starting generating positions"
missed = self._generate_left_positions()
print "Done generating positions"
if len(missed) != 0:
print "The IK generator has missed the following positions"
print missed
print "You will now repeat the calibration. Try again :)"
self.__init__("left") # hack
else:
print "Saving your new configuration!"
self._save_config(self._config_path + "left_positions.config")
self.done_calibration = True
def test(self):
if self._limb == "right":
self.test_right()
else:
self.test_left()
def test_move(self, pos):
self._baxter_limb.move_to_joint_positions(self._neutral_pos)
self._baxter_limb.move_to_joint_positions(self._mill_pos[(pos)][1])
self._baxter_limb.move_to_joint_positions(self._mill_pos[(pos)][0])
self._baxter_limb.move_to_joint_positions(self._mill_pos[(pos)][1])
def test_pick(self, pos):
self._baxter_limb.move_to_joint_positions(self._neutral_pos)
self._baxter_limb.move_to_joint_positions(self._picking_pos[(pos)][1])
self._baxter_limb.move_to_joint_positions(self._picking_pos[(pos)][0])
self._baxter_limb.move_to_joint_positions(self._picking_pos[(pos)][1])
def test_right(self):
pass
def test_left(self):
"""
Tests the four corners.
"""
if not self.done_calibration:
print "Calibrate the positions first!"
return -1
print "TESTING!"
self.test_move("a1")
self.test_move("a7")
self.test_move("g7")
self.test_move("g1")
self.test_pick("p1")
self.test_pick("p9")
self._baxter_limb.move_to_joint_positions(self._neutral_pos)
def main():
rospy.init_node("baxter_mill_calibrate")
rs = baxter_interface.RobotEnable()
rs.enable()
left = Calibrate("left")
left.get_locations()
left.test()
if __name__ == "__main__":
sys.exit(main())
|
|
""" An implementation of the paper "A Neural Algorithm of Artistic Style"
by Gatys et al. in TensorFlow.
Author: Chip Huyen (huyenn@stanford.edu)
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
For more details, please read the assignment handout:
http://web.stanford.edu/class/cs20si/assignments/a2.pdf
"""
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
import vgg_model
import utils
# parameters to manage experiments
STYLE = 'guernica'
CONTENT = 'deadpool'
STYLE_IMAGE = 'styles/' + STYLE + '.jpg'
CONTENT_IMAGE = 'content/' + CONTENT + '.jpg'
IMAGE_HEIGHT = 250
IMAGE_WIDTH = 333
NOISE_RATIO = 0.6 # percentage of weight of the noise for intermixing with the content image
CONTENT_WEIGHT = 0.01
STYLE_WEIGHT = 1
# Layers used for style features. You can change this.
STYLE_LAYERS = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
W = [0.5, 1.0, 1.5, 3.0, 4.0] # give more weights to deeper layers.
# Layer used for content features. You can change this.
CONTENT_LAYER = 'conv4_2'
ITERS = 300
LR = 2.0
MEAN_PIXELS = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
""" MEAN_PIXELS is defined according to description on their github:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8
'In the paper, the model is denoted as the configuration D trained with scale jittering.
The input images should be zero-centered by mean pixel (rather than mean image) subtraction.
Namely, the following BGR values should be subtracted: [103.939, 116.779, 123.68].'
"""
# VGG-19 parameters file
VGG_DOWNLOAD_LINK = 'http://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-19.mat'
VGG_MODEL = 'imagenet-vgg-verydeep-19.mat'
EXPECTED_BYTES = 534904783
def _create_content_loss(p, f):
""" Calculate the loss between the feature representation of the
content image and the generated image.
Inputs:
p, f are just P, F in the paper
(read the assignment handout if you're confused)
Note: we won't use the coefficient 0.5 as defined in the paper
but the coefficient as defined in the assignment handout.
Output:
the content loss
"""
return tf.reduce_sum((f - p) ** 2) / (4.0 * p.size)
def _gram_matrix(F, N, M):
""" Create and return the gram matrix for tensor F
Hint: you'll first have to reshape F
"""
F = tf.reshape(F, (M, N))
return tf.matmul(tf.transpose(F), F)
def _single_style_loss(a, g):
""" Calculate the style loss at a certain layer
Inputs:
a is the feature representation of the real image
g is the feature representation of the generated image
Output:
the style loss at a certain layer (which is E_l in the paper)
Hint: 1. you'll have to use the function _gram_matrix()
2. we'll use the same coefficient for style loss as in the paper
3. a and g are feature representation, not gram matrices
"""
N = a.shape[3] # number of filters
M = a.shape[1] * a.shape[2] # height times width of the feature map
A = _gram_matrix(a, N, M)
G = _gram_matrix(g, N, M)
return tf.reduce_sum((G - A) ** 2 / ((2 * N * M) ** 2))
def _create_style_loss(A, model):
""" Return the total style loss
"""
n_layers = len(STYLE_LAYERS)
E = [_single_style_loss(A[i], model[STYLE_LAYERS[i]]) for i in range(n_layers)]
###############################
## TO DO: return total style loss
return sum([W[i] * E[i] for i in range(n_layers)])
###############################
def _create_losses(model, input_image, content_image, style_image):
with tf.variable_scope('loss') as scope:
with tf.Session() as sess:
sess.run(input_image.assign(content_image)) # assign content image to the input variable
p = sess.run(model[CONTENT_LAYER])
content_loss = _create_content_loss(p, model[CONTENT_LAYER])
with tf.Session() as sess:
sess.run(input_image.assign(style_image))
A = sess.run([model[layer_name] for layer_name in STYLE_LAYERS])
style_loss = _create_style_loss(A, model)
##########################################
## TO DO: create total loss.
## Hint: don't forget the content loss and style loss weights
total_loss = CONTENT_WEIGHT * content_loss + STYLE_WEIGHT * style_loss
##########################################
return content_loss, style_loss, total_loss
def _create_summary(model):
""" Create summary ops necessary
Hint: don't forget to merge them
"""
with tf.name_scope('summaries'):
tf.summary.scalar('content loss', model['content_loss'])
tf.summary.scalar('style loss', model['style_loss'])
tf.summary.scalar('total loss', model['total_loss'])
tf.summary.histogram('histogram content loss', model['content_loss'])
tf.summary.histogram('histogram style loss', model['style_loss'])
tf.summary.histogram('histogram total loss', model['total_loss'])
return tf.summary.merge_all()
def train(model, generated_image, initial_image):
""" Train your model.
Don't forget to create folders for checkpoints and outputs.
"""
skip_step = 1
with tf.Session() as sess:
saver = tf.train.Saver()
###############################
## TO DO:
## 1. initialize your variables
## 2. create writer to write your graph
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(EXP + '/graphs', sess.graph)
###############################
sess.run(generated_image.assign(initial_image))
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
initial_step = model['global_step'].eval()
start_time = time.time()
for index in range(initial_step, ITERS):
if index >= 5 and index < 20:
skip_step = 10
elif index >= 20:
skip_step = 20
sess.run(model['optimizer'])
if (index + 1) % skip_step == 0:
###############################
## TO DO: obtain generated image and loss
gen_image, total_loss, summary = sess.run([generated_image, model['total_loss'],
model['summary_op']])
###############################
gen_image = gen_image + MEAN_PIXELS
writer.add_summary(summary, global_step=index)
print('Step {}\n Sum: {:5.1f}'.format(index + 1, np.sum(gen_image)))
print(' Loss: {:5.1f}'.format(total_loss))
print(' Time: {}'.format(time.time() - start_time))
start_time = time.time()
filename = 'outputs/%d.png' % (index)
utils.save_image(filename, gen_image)
if (index + 1) % 20 == 0:
saver.save(sess, 'checkpoints/style_transfer', index)
def main():
with tf.variable_scope('input') as scope:
# use variable instead of placeholder because we're training the intial image to make it
# look like both the content image and the style image
input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
model = vgg_model.load_vgg(VGG_MODEL, input_image)
model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
content_image = content_image - MEAN_PIXELS
style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
style_image = style_image - MEAN_PIXELS
model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model,
input_image, content_image, style_image)
###############################
## TO DO: create optimizer
model['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'],
global_step=model['global_step'])
###############################
model['summary_op'] = _create_summary(model)
initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
train(model, input_image, initial_image)
if __name__ == '__main__':
main()
|
|
from __future__ import absolute_import, division
from desispec.io import read_frame
from desispec.io import read_fiberflat
from desispec.io import read_sky
from desispec.io import shorten_filename
from desispec.io.fluxcalibration import read_stdstar_models
from desispec.io.fluxcalibration import write_flux_calibration
from desispec.fiberflat import apply_fiberflat
from desispec.sky import subtract_sky
from desispec.fluxcalibration import compute_flux_calibration, isStdStar
from desiutil.log import get_logger
from desitarget.targets import main_cmx_or_sv
from desispec.fiberbitmasking import get_fiberbitmasked_frame
import argparse
import os
import os.path
import numpy as np
import sys
from astropy.table import Table
def parse(options=None):
parser = argparse.ArgumentParser(description="Compute the flux calibration for a DESI frame using precomputed spectro-photometrically calibrated stellar models.")
parser.add_argument('--infile', type = str, default = None, required=True,
help = 'path of DESI exposure frame fits file')
parser.add_argument('--fiberflat', type = str, default = None, required=True,
help = 'path of DESI fiberflat fits file')
parser.add_argument('--sky', type = str, default = None, required=True,
help = 'path of DESI sky fits file')
parser.add_argument('--models', type = str, default = None, required=True,
help = 'path of spectro-photometric stellar spectra fits file')
parser.add_argument('--selected-calibration-stars', type = str, default = None, required=False,
help = 'path to table with list of pre-selected calibration stars')
parser.add_argument('--chi2cut', type = float, default = 0., required=False,
help = 'apply a reduced chi2 cut for the selection of stars')
parser.add_argument('--chi2cut-nsig', type = float, default = 0., required=False,
help = 'discard n-sigma outliers from the reduced chi2 of the standard star fit')
parser.add_argument('--color', type = str, default = None, required=False,
help = 'color used for filtering. Can be G-R R-Z or GAIA-BP-RP or GAIA-G-RP')
parser.add_argument('--min-color', type = float, default = None, required=False,
help = 'only consider stars with color greater than this')
parser.add_argument('--delta-color-cut', type = float, default = 0.2, required=False,
help = 'discard model stars with different broad-band color from imaging')
parser.add_argument('--nostdcheck', dest='nostdcheck',
help='Do not check the standards against flags in the FIBERMAP; just use objects in the model file', action='store_true')
parser.add_argument('--outfile', type = str, default = None, required=True,
help = 'path of DESI flux calbration fits file')
parser.add_argument('--qafile', type=str, default=None, required=False,
help='path of QA file.')
parser.add_argument('--qafig', type = str, default = None, required=False,
help = 'path of QA figure file')
parser.add_argument('--highest-throughput', type = int, default = 0, required=False,
help = 'use this number of stars ranked by highest throughput to normalize transmission (for DESI commissioning)')
parser.add_argument('--seeing-fwhm', type = float, default = 1.1, required=False,
help = 'seeing FWHM in arcsec, used for fiberloss correction')
parser.add_argument('--nsig-flux-scale', type = float, default = 3, required=False,
help = 'n sigma cutoff of the flux scale among standard stars')
parser.set_defaults(nostdcheck=False)
args = None
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args) :
log=get_logger()
cmd = ['desi_compute_fluxcalibration',]
for key, value in args.__dict__.items():
if value is not None:
cmd += ['--'+key, str(value)]
cmd = ' '.join(cmd)
log.info(cmd)
log.info("read frame")
# read frame
frame = read_frame(args.infile)
# Set fibermask flagged spectra to have 0 flux and variance
frame = get_fiberbitmasked_frame(frame, bitmask='flux',ivar_framemask=True)
log.info("apply fiberflat")
# read fiberflat
fiberflat = read_fiberflat(args.fiberflat)
# apply fiberflat
apply_fiberflat(frame, fiberflat)
log.info("subtract sky")
# read sky
skymodel=read_sky(args.sky)
# subtract sky
subtract_sky(frame, skymodel)
log.info("compute flux calibration")
# read models
model_flux, model_wave, model_fibers, model_metadata=read_stdstar_models(args.models)
if args.selected_calibration_stars is not None :
table=Table.read(args.selected_calibration_stars)
good=table["VALID"]==1
good_models = np.in1d( model_fibers , table["FIBER"][good] )
log.info("Selected {} good stars, fibers = {}, from {}".format(np.sum(good_models),model_fibers[good_models],args.selected_calibration_stars))
model_flux = model_flux[good_models]
model_fibers = model_fibers[good_models]
model_metadata = model_metadata[good_models]
if args.delta_color_cut > 0 :
log.warning("will ignore color cut because a preselected list of stars was given")
args.delta_color_cut = 0
if args.min_color is not None :
log.warning("will ignore min color because a preselected list of stars was given")
args.min_color = None
if args.chi2cut_nsig > 0 :
log.warning("will ignore chi2 cut because a preselected list of stars was given")
args.chi2cut_nsig = 0
if args.nsig_flux_scale > 0 :
log.warning("set nsig_flux_scale because a preselected list of stars was given")
args.nsig_flux_scale = 0.
ok=np.ones(len(model_metadata),dtype=bool)
if args.chi2cut > 0 :
log.info("apply cut CHI2DOF<{}".format(args.chi2cut))
good = (model_metadata["CHI2DOF"]<args.chi2cut)
bad = ~good
ok &= good
if np.any(bad) :
log.info(" discard {} stars with CHI2DOF= {}".format(np.sum(bad),list(model_metadata["CHI2DOF"][bad])))
legacy_filters = ('G-R', 'R-Z')
gaia_filters = ('GAIA-BP-RP', 'GAIA-G-RP')
model_column_list = model_metadata.columns.names
if args.color is None:
if 'MODEL_G-R' in model_column_list:
color = 'G-R'
elif 'MODEL_GAIA-BP-RP' in model_column_list:
log.info('Using Gaia filters')
color ='GAIA-BP-RP'
else:
log.error("Can't find either G-R or BP-RP color in the model file.")
sys.exit(15)
else:
if args.color not in legacy_filters and args.color not in gaia_filters:
log.error('Color name {} is not allowed, must be one of {} {}'.format(args.color, legacy_filters,gaia_filters))
sys.exit(14)
color = args.color
if color not in model_column_list:
# This should't happen
log.error('The color {} was not computed in the models'.format(color))
sys.exit(16)
if args.delta_color_cut > 0 :
# check dust extinction values for those stars
stars_ebv = np.array(frame.fibermap[model_fibers % 500]["EBV"])
log.info("min max E(B-V) for std stars = {:4.3f} {:4.3f}".format(np.min(stars_ebv),np.max(stars_ebv)))
star_gr_reddening_relative_error = 0.2 * stars_ebv
log.info("Consider a g-r reddening sys. error in the range {:4.3f} {:4.3f}".format(np.min(star_gr_reddening_relative_error),np.max(star_gr_reddening_relative_error)))
log.info("apply cut |delta color|<{}+reddening error".format(args.delta_color_cut))
good = (np.abs(model_metadata["MODEL_"+color]-model_metadata["DATA_"+color])<args.delta_color_cut+star_gr_reddening_relative_error)
bad = ok&(~good)
ok &= good
if np.any(bad) :
vals=model_metadata["MODEL_"+color][bad]-model_metadata["DATA_"+color][bad]
log.info(" discard {} stars with dcolor= {}".format(np.sum(bad),list(vals)))
if args.min_color is not None :
log.info("apply cut DATA_{}>{}".format(color, args.min_color))
good = (model_metadata["DATA_{}".format(color)]>args.min_color)
bad = ok&(~good)
ok &= good
if np.any(bad) :
vals=model_metadata["DATA_{}".format(color)][bad]
log.info(" discard {} stars with {}= {}".format(np.sum(bad),color,list(vals)))
if args.chi2cut_nsig > 0 :
# automatically reject stars that ar chi2 outliers
mchi2=np.median(model_metadata["CHI2DOF"])
rmschi2=np.std(model_metadata["CHI2DOF"])
maxchi2=mchi2+args.chi2cut_nsig*rmschi2
log.info("apply cut CHI2DOF<{} based on chi2cut_nsig={}".format(maxchi2,args.chi2cut_nsig))
good = (model_metadata["CHI2DOF"]<=maxchi2)
bad = ok&(~good)
ok &= good
if np.any(bad) :
log.info(" discard {} stars with CHI2DOF={}".format(np.sum(bad),list(model_metadata["CHI2DOF"][bad])))
ok=np.where(ok)[0]
if ok.size == 0 :
log.error("selection cuts discarded all stars")
sys.exit(12)
nstars=model_flux.shape[0]
nbad=nstars-ok.size
if nbad>0 :
log.warning("discarding %d star(s) out of %d because of cuts"%(nbad,nstars))
model_flux=model_flux[ok]
model_fibers=model_fibers[ok]
model_metadata=model_metadata[:][ok]
stdcheck = not args.nostdcheck
# check that the model_fibers are actually standard stars
fibermap = frame.fibermap
## check whether star fibers from args.models are consistent with fibers from fibermap
## if not print the OBJTYPE from fibermap for the fibers numbers in args.models and exit
if stdcheck:
fibermap_std_indices = np.where(isStdStar(fibermap))[0]
if np.any(~np.in1d(model_fibers%500, fibermap_std_indices)):
target_colnames, target_masks, survey = main_cmx_or_sv(fibermap)
colname = target_colnames[0]
for i in model_fibers%500:
log.error("inconsistency with spectrum {}, OBJTYPE={}, {}={} in fibermap".format(
i, fibermap["OBJTYPE"][i], colname, fibermap[colname][i]))
sys.exit(12)
else:
fibermap_std_indices = model_fibers % 500
# Make sure the fibers of interest aren't entirely masked.
if np.sum(np.sum(frame.ivar[model_fibers%500, :] == 0, axis=1) == frame.nwave) == len(model_fibers):
log.warning('All standard-star spectra are masked!')
return
fluxcalib = compute_flux_calibration(frame, model_wave, model_flux,
model_fibers%500,
highest_throughput_nstars=args.highest_throughput,
exposure_seeing_fwhm=args.seeing_fwhm,
stdcheck=stdcheck, nsig_flux_scale= args.nsig_flux_scale)
# QA
if (args.qafile is not None):
from desispec.io import write_qa_frame
from desispec.io.qa import load_qa_frame
from desispec.qa import qa_plots
log.info("performing fluxcalib QA")
# Load
qaframe = load_qa_frame(args.qafile, frame_meta=frame.meta, flavor=frame.meta['FLAVOR'])
# Run
#import pdb; pdb.set_trace()
qaframe.run_qa('FLUXCALIB', (frame, fluxcalib))
# Write
if args.qafile is not None:
write_qa_frame(args.qafile, qaframe)
log.info("successfully wrote {:s}".format(args.qafile))
# Figure(s)
if args.qafig is not None:
qa_plots.frame_fluxcalib(args.qafig, qaframe, frame, fluxcalib)
# record inputs
frame.meta['IN_FRAME'] = shorten_filename(args.infile)
frame.meta['IN_SKY'] = shorten_filename(args.sky)
frame.meta['FIBERFLT'] = shorten_filename(args.fiberflat)
frame.meta['STDMODEL'] = shorten_filename(args.models)
# write result
write_flux_calibration(args.outfile, fluxcalib, header=frame.meta)
log.info("successfully wrote %s"%args.outfile)
|
|
# Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
from gevent import spawn_later
from gevent.event import Event
import requests
from tellapart.aurproxy.audit import AuditItem
from tellapart.aurproxy.share.adjuster import ShareAdjuster
from tellapart.aurproxy.util import get_logger
logger = get_logger(__name__)
class HttpHealthCheckLogEvent(object):
"""
Events that can be logged during the course of HttpHealthCheck execution.
"""
COMPLETED_CHECK = 'completed_check'
RUNNING_CALLBACK = 'running_callback'
RUNNING_CHECK = 'running_check'
STARTED_CHECKER = 'started_checker'
STARTING_CHECK = 'starting_check'
STOPPED_CHECKER = 'stopped_checker'
UPDATED_HEALTH_STATUS = 'updated_health_status'
class HttpHealthCheckLogResult(object):
"""
Action results that can be logged during the course of HttpHealthCheck
execution.
"""
ERROR = 'error'
FAILURE = 'failure'
SUCCESS = 'success'
TIMEOUT = 'timeout'
class HealthCheckResult(object):
"""
Possible results of individual check runs.
"""
ERROR_CODE = 'error_code'
KNOWN_LOCAL_ERROR = 'known_local_error'
KNOWN_REMOTE_ERROR = 'known_remote_error'
SUCCESS = 'success'
TIMEOUT = 'timeout'
UNKNOWN_ERROR = 'unknown_error'
HEALTHY_RESULTS = [ HealthCheckResult.SUCCESS ]
UNCHANGED_RESULTS = [ HealthCheckResult.KNOWN_LOCAL_ERROR ]
UNHEALTHY_RESULTS = [ HealthCheckResult.ERROR_CODE,
HealthCheckResult.KNOWN_REMOTE_ERROR,
HealthCheckResult.TIMEOUT,
HealthCheckResult.UNKNOWN_ERROR ]
SUPPORTED_HEALTHCHECK_METHODS = ('GET', 'HEAD')
class HealthCheckStatus(object):
"""
Possible statuses of ongoing check instances.
"""
HEALTHY = 'healthy'
INITIALIZING = 'initializing'
UNHEALTHY = 'unhealthy'
HEALTHY_STATUSES = [ HealthCheckStatus.HEALTHY ]
class HttpHealthCheckShareAdjuster(ShareAdjuster):
def __init__(self, endpoint, signal_update_fn, route='/health', interval=5,
timeout=3.0, unhealthy_threshold=2, healthy_threshold=2,
port_name=None, http_method='GET'):
"""
A basic http health check implementation. Parameters match those available
on an Elastic Loadbalancer.
Checks for 200 response code.
Args:
endpoint - Endpoint to check.
signal_update_fn - function - function to call on status update.
route - str - http route to check.
interval - int - seconds between checks.
timeout - float - seconds before a check attempt times out.
unhealthy_threshold - int - failures before endpoint marked unhealthy.
healthy_threshold - int - successes before endpoint marked healthy.
port_name - str - Optional name of port to check. EG: 'health'.
http_method - str - Optional uppercase name of the http verb. EG: GET or HEAD
"""
super(HttpHealthCheckShareAdjuster, self).__init__(endpoint,
signal_update_fn)
self._route = route
self._interval = int(interval)
self._timeout = float(timeout)
self._unhealthy_threshold = int(unhealthy_threshold)
self._healthy_threshold = int(healthy_threshold)
self._port_name = port_name
max_result_len = self._healthy_threshold + self._unhealthy_threshold
self._check_results = collections.deque(maxlen=max_result_len)
self._status = HealthCheckStatus.INITIALIZING
self._stop_event = Event()
if http_method.upper() not in SUPPORTED_HEALTHCHECK_METHODS:
raise Exception('http_method only supports: {}'.format(
', '.join(SUPPORTED_HEALTHCHECK_METHODS),
))
self._http_method = http_method.lower()
@property
def status(self):
"""
Get current status of endpoint.
Returns:
A HealthCheckStatus value.
"""
return self._status
def start(self):
"""
Start running healthchecks against endpoint.
"""
spawn_later(self._interval, self._check)
self._record(HttpHealthCheckLogEvent.STARTED_CHECKER,
HttpHealthCheckLogResult.SUCCESS)
def stop(self):
"""
Stop running healthchecks against endpoint.
"""
self._stop_event.set()
self._record(HttpHealthCheckLogEvent.STOPPED_CHECKER,
HttpHealthCheckLogResult.SUCCESS)
@property
def auditable_share(self):
"""Return current share adjustment factor.
"""
if self.status in HEALTHY_STATUSES:
return 1.0, AuditItem('health', '1.0')
else:
return 0.0, AuditItem('health', '0.0')
def _build_check_uri(self):
"""
Builds the URI to check.
Returns:
Check URI string.
"""
uri_template = 'http://{0}:{1}{2}'
if self._port_name:
port = self._endpoint.context['port_map'][self._port_name]
else:
port = self._endpoint.port
return uri_template.format(self._endpoint.host,
port,
self._route)
def _check(self):
"""
Run healthcheck.
Args:
restart_timer - bool - Whether to restart check timer after checking.
"""
if self._stop_event.is_set():
return
check_uri = self._build_check_uri()
error_log_fn = None
try:
self._record(HttpHealthCheckLogEvent.STARTING_CHECK,
HttpHealthCheckLogResult.SUCCESS, log_fn=logger.debug)
r = getattr(requests, self._http_method)(check_uri, timeout=self._timeout)
if r.status_code == requests.codes.ok:
check_result = HealthCheckResult.SUCCESS
self._record(HttpHealthCheckLogEvent.RUNNING_CHECK,
HttpHealthCheckLogResult.SUCCESS, log_fn=logger.debug)
else:
check_result = HealthCheckResult.ERROR_CODE
self._record(HttpHealthCheckLogEvent.RUNNING_CHECK,
HttpHealthCheckLogResult.FAILURE,
'status_code:{0}'.format(r.status_code))
except requests.exceptions.Timeout:
check_result = HealthCheckResult.TIMEOUT
self._record(HttpHealthCheckLogEvent.RUNNING_CHECK,
HttpHealthCheckLogResult.TIMEOUT)
except requests.exceptions.ConnectionError as ex:
if 'gaierror' in unicode(ex):
check_result = HealthCheckResult.KNOWN_LOCAL_ERROR
error_log_fn = logger.error
elif 'connection refused' in unicode(ex).lower():
check_result = HealthCheckResult.KNOWN_REMOTE_ERROR
error_log_fn = logger.error
else:
check_result = HealthCheckResult.UNKNOWN_ERROR
error_log_fn = logger.exception
except Exception:
check_result = HealthCheckResult.UNKNOWN_ERROR
error_log_fn = logger.exception
if error_log_fn:
error_log_fn('Exception when executing HttpHealthCheck.')
self._record(HttpHealthCheckLogEvent.RUNNING_CHECK,
check_result)
self._update_status(check_result)
spawn_later(self._interval, self._check)
def _record(self, event, result, msg='', log_fn=logger.info):
"""
Utility to record HttpHealthCheck events and results.
Args:
event - HttpHealthCheckLogEvent.
result - HttpHealthCheckLogResult.
msg - str - Extra message.
log_fn - function - logger function to use.
"""
f = 'event:%(event)s result:%(result)s check_uri:%(check_uri)s msg:%(msg)s'
context = { 'event': event,
'result': result,
'check_uri': self._build_check_uri(),
'msg': msg }
log_fn(f, context)
def _update_status(self, check_result):
"""
If necessary based on configuration, update status of this check.
Calls self._callback if set.
Args:
check_result - HttpCheckResult
"""
if check_result in UNCHANGED_RESULTS:
return
self._check_results.append(check_result)
check_results = copy.copy(self._check_results)
calculated_status = self._status
healthy_lookback = list(check_results)[-self._healthy_threshold:]
if len(healthy_lookback) == self._healthy_threshold and \
all([cr in HEALTHY_RESULTS for cr in healthy_lookback]):
calculated_status = HealthCheckStatus.HEALTHY
unhealthy_lookback = list(check_results)[-self._unhealthy_threshold:]
if len(unhealthy_lookback) == self._unhealthy_threshold and \
all([cr in UNHEALTHY_RESULTS for cr in unhealthy_lookback]):
calculated_status = HealthCheckStatus.UNHEALTHY
if self._status != calculated_status:
old_status = self._status
self._status = calculated_status
self._record(HttpHealthCheckLogEvent.UPDATED_HEALTH_STATUS,
HttpHealthCheckLogResult.SUCCESS,
'{0} -> {1}'.format(old_status, calculated_status))
if self._signal_update_fn:
try:
# Execute callback, passing old and new status
self._signal_update_fn()
except Exception:
logger.exception('Exception when executing callback on '
'BasicHttpHealthCheck status change.')
self._record(HttpHealthCheckLogEvent.RUNNING_CALLBACK,
HttpHealthCheckLogResult.ERROR)
|
|
import os
import string
from itertools import chain
from django.conf import settings
from django.contrib.auth.hashers import mask_hash
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import SimpleArrayField
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db.models import FloatField, CharField, SlugField, PositiveIntegerField
from django.forms import widgets
from django.utils.crypto import get_random_string
from django.utils.functional import curry
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from pint import UnitRegistry
from pytz import common_timezones
from django_tricks.utils.luhncode import LuhnCodeGenerator
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TIMEZONE_CHOICES = [(timezone, timezone) for timezone in common_timezones]
ureg = UnitRegistry(system='mks')
class UnitField(FloatField):
def __init__(self, *args, **kwargs):
unit = kwargs.pop('unit', None)
if unit is None:
raise ValueError('Missing unit definition.')
self.unit = ureg(unit)
self.symbol = ureg.get_symbol(unit)
super(UnitField, self).__init__(*args, **kwargs)
if not self.help_text:
self.help_text = _('Value in {}').format(self.symbol)
def deconstruct(self):
name, path, args, kwargs = super(UnitField, self).deconstruct()
kwargs['unit'] = self.symbol
return name, path, args, kwargs
def to_python(self, value):
if value:
return value * self.unit
return value
def formfield(self, **defaults):
attrs = {'type': 'number',
'step': 'any'}
min_values = [validator.limit_value for validator in self.validators if
isinstance(validator, MinValueValidator)]
if min_values:
attrs['min'] = min(min_values)
if not self.blank:
attrs['required'] = 'required'
defaults.update({'widget': widgets.NumberInput(attrs=attrs)})
return super(UnitField, self).formfield(**defaults)
class TimezoneChoiceField(CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 200)
kwargs['choices'] = TIMEZONE_CHOICES
super().__init__(*args, **kwargs)
class FlagsCheckboxSelectMultiple(widgets.CheckboxSelectMultiple):
def __init__(self, *args, **kwargs):
self.queryset = kwargs.pop('queryset')
super(FlagsCheckboxSelectMultiple, self).__init__(*args, **kwargs)
def value_from_datadict(self, data, files, name):
value = super(FlagsCheckboxSelectMultiple, self).value_from_datadict(data, files, name)
add_field_name = '%s_add' % name
add_flags = data.get(add_field_name, None)
if add_flags:
value = value + add_flags.split(',')
value = ', '.join(set(value))
return value
def append_choices(self, field_name, choices):
flags = self.queryset.values_list(field_name, flat=True)
flags = filter(lambda f: f is not None, flags) # Remove empty lists
flags = chain(*flags) # Concatenate lists
flags = set(flags) # Remove duplicates
values = [value for value, label in choices]
flags = filter(lambda flag: flag not in values, flags)
return list(chain(choices, [(value, value.title()) for value in flags]))
def render(self, name, value, attrs=None, choices=()):
self.choices = self.append_choices(name, self.choices)
output = super(FlagsCheckboxSelectMultiple, self).render(name, value, attrs, choices)
output += format_html('<ul><li><label for="id_add_flag">Add a new flag(s)</label><br>'
'<input id="id_{0}_add" name="{0}_add" type="text" '
'class="vTextField" maxlength="140" '
'placeholder="A single flag, or many separated by commas \",\"">'
'</li></ul>'.format(name))
return mark_safe(output)
class SimpleFlagField(SimpleArrayField):
def to_python(self, value):
if isinstance(value, (list, tuple)):
value = ', '.join(value)
return super().to_python(value)
def prepare_value(self, value):
value = super(SimpleFlagField, self).prepare_value(value)
if value:
return value.split(',')
return value
class FlagsField(ArrayField):
def __init__(self, flags, size=None, **kwargs):
base_field = SlugField(max_length=10, blank=True)
self.flags = flags
super(FlagsField, self).__init__(base_field=base_field, size=size, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': SimpleFlagField,
'widget': FlagsCheckboxSelectMultiple(
choices=self.flags,
queryset=self.model._default_manager.filter())}
defaults.update(kwargs)
return super().formfield(**defaults)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['size']
del kwargs['base_field']
kwargs['flags'] = self.flags
return name, path, args, kwargs
class NumberField(PositiveIntegerField):
def formfield(self, **defaults):
attrs = {'type': 'number', 'step': '1'}
if not self.blank:
attrs['required'] = 'required'
min_values = [validator.limit_value for validator in self.validators if
isinstance(validator, MinValueValidator)]
max_values = [validator.limit_value for validator in self.validators if
isinstance(validator, MaxValueValidator)]
if min_values:
attrs['min'] = min(min_values)
if max_values:
attrs['max'] = max(max_values)
defaults.update({'widget': widgets.NumberInput(attrs=attrs)})
return super(NumberField, self).formfield(**defaults)
class PositiveNumberField(PositiveIntegerField):
default_validators = [MinValueValidator(0)]
class UppercaseCharField(CharField):
def from_db_value(self, value, expression, connection, context):
if isinstance(value, str):
return value.upper()
else:
return value
class DefaultRandomCharField(CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('blank', True)
kwargs.setdefault('max_length', 100)
self.length = kwargs.pop('length', None)
if self.length is None:
raise ValueError('Missing length of the random strinbg.')
super().__init__(*args, **kwargs)
if self.length > self.max_length:
raise ValueError('Random string can not be more than the field max_length.')
def deconstruct(self):
name, path, args, kwargs = super(DefaultRandomCharField, self).deconstruct()
kwargs['length'] = self.length
return name, path, args, kwargs
def get_random_option(self):
return get_random_string(
allowed_chars=string.ascii_uppercase + string.ascii_lowercase + string.digits,
length=self.length)
def pre_save(self, model_instance, add):
if not add and getattr(model_instance, self.attname) != '':
return getattr(model_instance, self.attname)
while True:
value = self.get_random_option()
if model_instance._default_manager.filter(**{self.name: value}).exists():
continue
else:
return value
class LuhnCodeRandomField(DefaultRandomCharField):
def get_random_option(self):
generator = LuhnCodeGenerator()
return generator.encode(settings.SECRET_KEY, parts=self.length)
def get_FIELD_mask(self, field):
value = getattr(self, field.attname)
return mask_hash(value)
def contribute_to_class(self, cls, name, virtual_only=False):
super(LuhnCodeRandomField, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, 'get_%s_mask', curry(self.get_FIELD_mask, field=self))
|
|
import colorsys
import itertools
import json
import zlib
from collections import defaultdict, namedtuple
from operator import attrgetter
from django.db import transaction
from django.db.models import Sum, Min, Max
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.csrf import csrf_exempt
from tracebin_server.utils import JSONResponse
from .models import (Log, RuntimeEnviroment, PythonTrace, TraceSection,
ResOpChunk, PythonChunk, Call)
def trace_overview(request, id):
log = get_object_or_404(Log, id=id)
return render(request, "traces/trace/overview.html", {
"page": "overview",
"log": log,
})
@csrf_exempt
@transaction.commit_on_success
def trace_upload(request):
if request.method == "GET":
return render(request, "traces/trace/new.html")
assert request.method == "POST"
assert request.META["CONTENT_TYPE"] == "application/json"
encoding = request.META.get("CONTENT_ENCODING")
if encoding == "gzip":
raw_data = zlib.decompress(request.raw_post_data)
elif encoding is None:
raw_data = request.raw_post_data
else:
raise NotImplementedError(encoding)
data = json.loads(raw_data)
# They're all public=True until we have authentication for the client.
log = Log.objects.create(
public=True,
command=data.get("command", u""),
stdout=data.get("stdout", u""),
stderr=data.get("stderr", u""),
runtime=data.get("runtime"),
)
for key, value in data.get("options", {}).iteritems():
if key == "jit":
kind = RuntimeEnviroment.JIT_OPTION
elif key == "gc":
kind = RuntimeEnviroment.GC_OPTION
elif key == "build":
kind = RuntimeEnviroment.BUILD_OPTION
for key, value in value.iteritems():
log.enviroment_options.create(kind=kind, key=key, value=value)
for trace in data.get("traces", []):
kwargs = {"log": log}
if trace["type"] == "python":
kwargs["root_file"] = trace["root_file"]
kwargs["root_function"] = trace["root_function"]
cls = PythonTrace
trace_obj = cls.objects.create(**kwargs)
for i, section in enumerate(trace["sections"]):
if section["label"] == "Entry":
label = TraceSection.ENTRY
elif section["label"] == "Preamble":
label = TraceSection.PREAMBLE
elif section["label"] == "Loop body":
label = TraceSection.LOOP_BODY
section_obj = trace_obj.sections.create(ordering=i, label=label)
for i, chunk in enumerate(section["chunks"]):
kwargs = {
"section": section_obj,
"ordering": i,
}
if chunk["type"] == "resop":
cls = ResOpChunk
kwargs["raw_source"] = chunk["ops"]
elif chunk["type"] == "python":
cls = PythonChunk
kwargs["raw_source"] = chunk["source"]
assert sorted(chunk["linenos"]) == chunk["linenos"]
kwargs["start_line"] = chunk["linenos"][0]
kwargs["end_line"] = chunk["linenos"][-1] + 1
cls.objects.create(**kwargs)
_add_calls(log, data.get("calls"))
return redirect(log)
def _add_calls(log, calls, parent=None):
if calls is None:
return
if parent is None:
depth = 0
else:
depth = parent.call_depth + 1
# These are calls which can be grouped together into a single insert.
no_children_calls = []
for call in calls:
kwargs = {
"start_time": call["start_time"],
"end_time": call["end_time"],
"call_depth": depth,
"name": call["name"],
"parent": parent,
"log": log,
}
if call["subcalls"]:
inst = Call.objects.create(**kwargs)
_add_calls(log, call["subcalls"], parent=inst)
else:
no_children_calls.append(Call(**kwargs))
if no_children_calls:
Call.objects.bulk_create(no_children_calls)
def trace_compiled_list(request, id):
log = get_object_or_404(Log, id=id)
return render(request, "traces/trace/compiled_list.html", {
"page": "compiled",
"log": log,
})
def trace_timeline(request, id):
log = get_object_or_404(Log, id=id)
return render(request, "traces/trace/timeline.html", {
"page": "timeline",
"log": log,
})
def trace_compiled_detail(request, id, compiled_id):
log = get_object_or_404(Log, id=id)
trace = get_object_or_404(log.traces.all(), id=compiled_id)
return render(request, "traces/trace/compiled_detail.html", {
"page": "compiled",
"log": log,
"trace": trace,
})
CallNode = namedtuple("CallNode", ["name", "start_time", "end_time", "depth", "color"])
SATURATION = .8
VALUE = .8
def generate_colors():
# Code from Marty Alchin
for i in itertools.count():
h = i * .15
s = .3 + ((i * .15) % .7)
v = .3 + (((i + 3) * .15) % .7)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
yield int(r * 255), int(g * 255), int(b * 255)
def compute_pixels(node, slice_start, slice_end):
slice_width = slice_end - slice_start
return 675 * ((node.end_time - node.start_time) / slice_width)
def merge_nodes(node1, node2):
names = node1.name | node2.name
start_time = min(node1.start_time, node2.start_time)
end_time = max(node1.start_time, node2.start_time)
assert node1.depth == node2.depth
node1_weight = (node1.end_time - node1.start_time) / (end_time - start_time)
node2_weight = (node2.end_time - node2.start_time) / (end_time - start_time)
r = node1.color[0] * node1_weight + node2.color[0] * node2_weight
g = node1.color[1] * node1_weight + node2.color[1] * node2_weight
b = node1.color[2] * node1_weight + node2.color[2] * node2_weight
return CallNode(names, start_time, end_time, node1.depth, (int(r), int(g), int(b)))
def trace_timeline_call_data(request, id):
log = get_object_or_404(Log, id=id)
start_percent = float(request.GET.get("start_percent", 0))
end_percent = float(request.GET.get("end_percent", 1))
absolute_start_end = log.calls.aggregate(Min("start_time"), Max("end_time"))
absolute_start = absolute_start_end["start_time__min"]
absolute_end = absolute_start_end["end_time__max"]
filters = {
"end_time__gte": absolute_start + (start_percent * (absolute_end - absolute_start)),
"start_time__lte": absolute_start + (end_percent * (absolute_end - absolute_start)),
}
data = defaultdict(list)
calls = log.calls.filter(**filters)
color_generator = generate_colors()
known_colors = {}
for call in calls.iterator():
if call.name not in known_colors:
known_colors[call.name] = color_generator.next()
data[call.call_depth].append(CallNode(
{call.name}, call.start_time, call.end_time, call.call_depth,
known_colors[call.name]
))
slice_start = float("inf")
slice_end = float("-inf")
for calls in data.itervalues():
slice_start = min(slice_start, min(map(attrgetter("start_time"), calls)))
slice_end = max(slice_end, max(map(attrgetter("end_time"), calls)))
for depth, calls in data.items():
new_calls = []
iterator = iter(sorted(calls, key=attrgetter("start_time")))
for node in iterator:
while compute_pixels(node, slice_start, slice_end) < 2:
try:
next_node = iterator.next()
except StopIteration:
break
node = merge_nodes(node, next_node)
new_calls.append(node)
data[depth] = new_calls
return JSONResponse([
{
"name": ", ".join(node.name),
"start_time": node.start_time,
"end_time": node.end_time,
"depth": node.depth,
"color": "#%02X%02X%02X" % node.color,
}
for depth in data.itervalues() for node in depth
])
def trace_call_data(request, id):
log = get_object_or_404(Log, id=id)
call = get_object_or_404(log.calls.all(), id=request.GET["call_id"])
call_time = call.end_time - call.start_time
subcall_times = call.subcalls.aggregate(
total_start_time=Sum("start_time"),
total_end_time=Sum("end_time")
)
call_total_subcall_start_time = subcall_times["total_start_time"] or 0
call_total_subcall_end_time = subcall_times["total_end_time"] or 0
call_subcall_time = call_total_subcall_end_time - call_total_subcall_start_time
call_exclusive_time = call_time - call_subcall_time
func_times = log.calls.filter(name=call.name).aggregate(
total_start_time=Sum("start_time"),
total_end_time=Sum("end_time"),
)
func_total_start_time = func_times["total_start_time"] or 0
func_total_end_time = func_times["total_end_time"] or 0
func_time = func_total_end_time - func_total_start_time
func_subcall_times = log.calls.filter(name=call.name).aggregate(
total_subcalls_start_time=Sum("subcalls__start_time"),
total_subcalls_end_time=Sum("subcalls__end_time"),
)
func_total_subcall_start_time = func_subcall_times["total_subcalls_start_time"] or 0
func_total_subcall_end_time = func_subcall_times["total_subcalls_end_time"] or 0
func_subcalls_time = func_total_subcall_end_time - func_total_subcall_start_time
func_exclusive_time = func_time - func_subcalls_time
data = {
"call_time": call_time,
"call_exclusive_time": call_exclusive_time,
"func_time": func_time,
"func_exclusive_time": func_exclusive_time,
}
return JSONResponse(data)
|
|
from sympy import (limit, exp, oo, log, sqrt, Limit, sin, floor, cos, ceiling,
atan, gamma, Symbol, S, pi, Integral, cot, Rational, I, zoo,
tan, cot, integrate, Sum)
from sympy.abc import x, y, z
from sympy.utilities.pytest import XFAIL, raises
from sympy.utilities.iterables import cartes
def test_basic1():
assert limit(x, x, oo) == oo
assert limit(x, x, -oo) == -oo
assert limit(-x, x, oo) == -oo
assert limit(x**2, x, -oo) == oo
assert limit(-x**2, x, oo) == -oo
assert limit(x*log(x), x, 0, dir="+") == 0
assert limit(1/x, x, oo) == 0
assert limit(exp(x), x, oo) == oo
assert limit(-exp(x), x, oo) == -oo
assert limit(exp(x)/x, x, oo) == oo
assert limit(1/x - exp(-x), x, oo) == 0
assert limit(x + 1/x, x, oo) == oo
assert limit(x - x**2, x, oo) == -oo
assert limit((1 + x)**(1 + sqrt(2)),x,0) == 1
assert limit((1 + x)**oo, x, 0) == oo
assert limit((1 + x)**oo, x, 0, dir='-') == 0
assert limit((1 + x + y)**oo, x, 0, dir='-') == (1 + y)**(oo)
assert limit(y/x/log(x), x, 0) == -y*oo
assert limit(cos(x + y)/x, x, 0) == cos(y)*oo
raises(NotImplementedError, 'limit(Sum(1/x, (x, 1, y)) - log(y), y, oo)')
assert limit(Sum(1/x, (x, 1, y)) - 1/y, y, oo) == Sum(1/x, (x, 1, oo))
assert limit(gamma(1/x + 3), x, oo) == 2
# approaching 0
# from dir="+"
assert limit(1 + 1/x, x, 0) == oo
# from dir='-'
# Add
assert limit(1 + 1/x, x, 0, dir='-') == -oo
# Pow
assert limit(x**(-2), x, 0, dir='-') == oo
assert limit(x**(-3), x, 0, dir='-') == -oo
assert limit(x**(-Rational(1, 2)), x, 0, dir='-') == (-oo)*I
assert limit(x**2, x, 0, dir='-') == 0
assert limit(x**(Rational(1, 2)), x, 0, dir='-') == 0
assert limit(x**-pi, x, 0, dir='-') == zoo
assert limit((1 + cos(x))**oo, x, 0) == oo
def test_basic2():
assert limit(x**x, x, 0, dir="+") == 1
assert limit((exp(x)-1)/x, x, 0) == 1
assert limit(1 + 1/x, x, oo) == 1
assert limit(-exp(1/x), x, oo) == -1
assert limit(x + exp(-x), x, oo) == oo
assert limit(x + exp(-x**2), x, oo) == oo
assert limit(x + exp(-exp(x)), x, oo) == oo
assert limit(13 + 1/x - exp(-x), x, oo) == 13
def test_basic3():
assert limit(1/x, x, 0, dir="+") == oo
assert limit(1/x, x, 0, dir="-") == -oo
def test_basic4():
assert limit(2*x + y*x, x, 0) == 0
assert limit(2*x + y*x, x, 1) == 2+y
assert limit(2*x**8 + y*x**(-3), x, -2) == 512 - y/8
assert limit(sqrt(x + 1) - sqrt(x), x, oo)==0
assert integrate(1/(x**3+1),(x,0,oo)) == 2*pi*sqrt(3)/9
def test_issue786():
assert limit(x*y + x*z, z, 2) == x*y+2*x
def test_Limit():
assert Limit(sin(x)/x, x, 0) != 1
assert Limit(sin(x)/x, x, 0).doit() == 1
def test_floor():
assert limit(floor(x), x, -2, "+") == -2
assert limit(floor(x), x, -2, "-") == -3
assert limit(floor(x), x, -1, "+") == -1
assert limit(floor(x), x, -1, "-") == -2
assert limit(floor(x), x, 0, "+") == 0
assert limit(floor(x), x, 0, "-") == -1
assert limit(floor(x), x, 1, "+") == 1
assert limit(floor(x), x, 1, "-") == 0
assert limit(floor(x), x, 2, "+") == 2
assert limit(floor(x), x, 2, "-") == 1
assert limit(floor(x), x, 248, "+") == 248
assert limit(floor(x), x, 248, "-") == 247
# note: if any of the tests below fails, just comment it out. General fix
# needs better assumptions handling.
# this doesn't work, it requires robust assumptions:
assert limit(floor(sin(x)), x, 0, "+") == 0
assert limit(floor(sin(x)), x, 0, "-") == -1
assert limit(floor(cos(x)), x, 0, "+") == 0
assert limit(floor(cos(x)), x, 0, "-") == 0
# this doesn't work, it requires robust assumptions:
assert limit(floor(5+sin(x)), x, 0, "+") == 5
#assert limit(floor(5+sin(x)), x, 0, "-") == 4
#assert limit(floor(5+cos(x)), x, 0, "+") == 5
#assert limit(floor(5+cos(x)), x, 0, "-") == 5
def test_ceiling():
assert limit(ceiling(x), x, -2, "+") == -1
assert limit(ceiling(x), x, -2, "-") == -2
assert limit(ceiling(x), x, -1, "+") == 0
assert limit(ceiling(x), x, -1, "-") == -1
assert limit(ceiling(x), x, 0, "+") == 1
assert limit(ceiling(x), x, 0, "-") == 0
assert limit(ceiling(x), x, 1, "+") == 2
assert limit(ceiling(x), x, 1, "-") == 1
assert limit(ceiling(x), x, 2, "+") == 3
assert limit(ceiling(x), x, 2, "-") == 2
assert limit(ceiling(x), x, 248, "+") == 249
assert limit(ceiling(x), x, 248, "-") == 248
# note: if any of the tests below fails, just comment it out. General fix
# needs better assumptions handling.
# this doesn't work, it requires robust assumptions:
#assert limit(ceiling(sin(x)), x, 0, "+") == 1
assert limit(ceiling(sin(x)), x, 0, "-") == 0
assert limit(ceiling(cos(x)), x, 0, "+") == 1
assert limit(ceiling(cos(x)), x, 0, "-") == 1
# this doesn't work, it requires robust assumptions:
#assert limit(ceiling(5+sin(x)), x, 0, "+") == 6
assert limit(ceiling(5+sin(x)), x, 0, "-") == 5
assert limit(ceiling(5+cos(x)), x, 0, "+") == 6
assert limit(ceiling(5+cos(x)), x, 0, "-") == 6
def test_atan():
x = Symbol("x", real=True)
assert limit(atan(x)*sin(1/x), x, 0) == 0
assert limit(atan(x) + sqrt(x+1) - sqrt(x), x, oo) == pi/2
def test_abs():
assert limit(abs(x), x, 0) == 0
assert limit(abs(sin(x)), x, 0) == 0
assert limit(abs(cos(x)), x, 0) == 1
assert limit(abs(sin(x+1)), x, 0) == sin(1)
def test_heuristic():
x = Symbol("x", real=True)
assert limit(log(2+sqrt(atan(x))*sqrt(sin(1/x))), x, 0) == log(2)
def test_issue772():
z = Symbol("z", positive=True)
f = -1/z*exp(-z*x)
assert limit(f, x, oo) == 0
assert f.limit(x, oo) == 0
def test_exponential():
n = Symbol('n')
assert limit((1+x/n)**n,n,oo) == exp(x)
assert limit((1+x/(2*n))**n,n,oo) == exp(x/2)
assert limit((1+x/(2*n+1))**n,n,oo) == exp(x/2)
assert limit(((x-1)/(x+1))**x,x,oo) == exp(-2)
@XFAIL
def test_exponential2():
n = Symbol('n')
assert limit((1+x/(n+sin(n)))**n,n,oo) == exp(x)
def test_doit():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
assert l.doit() == oo
@XFAIL
def test_doit2():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
# limit() breaks on the contained Integral.
assert l.doit(deep = False) == l
def test_bug693a():
assert sin(sin(x+1)+1).limit(x,0) == sin(sin(1)+1)
def test_issue693():
assert limit( (1-cos(x))/x**2, x, S(1)/2) == 4 - 4*cos(S(1)/2)
assert limit(sin(sin(x+1)+1), x, 0) == sin(1 + sin(1))
assert limit(abs(sin(x+1)+1), x, 0) == 1 + sin(1)
def test_issue991():
assert limit(1/(x+3), x, 2) == S(1)/5
assert limit(1/(x+pi), x, 2) == S(1)/(2+pi)
assert limit(log(x)/(x**2+3), x, 2) == log(2)/7
assert limit(log(x)/(x**2+pi), x, 2) == log(2)/(4+pi)
def test_issue1448():
assert limit(cot(x),x,0,dir='+') == oo
assert limit(cot(x),x,pi/2,dir='+') == 0
def test_issue2065():
assert limit(x**0.5, x, oo) == oo**0.5 == oo
assert limit(x**0.5, x, 16) == S(16)**0.5
assert limit(x**0.5, x, 0) == 0
assert limit(x**(-0.5), x, oo) == 0
assert limit(x**(-0.5), x, 4) == S(4)**(-0.5)
def test_issue2084():
# using list(...) so py.test can recalculate values
tests = list(cartes([x, -x],
[-1, 1],
[2, 3, Rational(1, 2), Rational(2, 3)],
['-', '+']))
results = (oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3), oo,
0, 0, 0, 0, 0, 0, 0, 0,
oo, oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3),
0, 0, 0, 0, 0, 0, 0, 0)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
y, s, e, d = args
eq=y**(s*e)
try:
assert limit(eq, x, 0, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print
print i, res, eq, d, limit(eq, x, 0, dir=d)
else:
assert None
def test_issue2085():
assert limit(sin(x)/x, x, oo) == 0
assert limit(atan(x), x, oo) == pi/2
assert limit(gamma(x), x, oo) == oo
assert limit(cos(x)/x, x, oo) == 0
assert limit(gamma(x), x, Rational(1, 2)) == sqrt(pi)
@XFAIL
def test_issue2130():
assert limit((1+y)**(1/y) - S.Exp1, y, 0) == 0
def test_issue1447():
# using list(...) so py.test can recalculate values
from sympy import sign
tests = list(cartes([cot, tan],
[-pi/2, 0, pi/2, pi, 3*pi/2],
['-', '+']))
results = (0, 0, -oo, oo, 0, 0, -oo, oo, 0, 0,
oo, -oo, 0, 0, oo, -oo, 0, 0, oo, -oo)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
f, l, d= args
eq=f(x)
try:
assert limit(eq, x, l, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print
print i, res, eq, l, d, limit(eq, x, l, dir=d)
else:
assert None
@XFAIL
def test_issue835():
assert limit((1 + x**log(3))**(1/x), x, 0) == 1
assert limit((5**(1/x) + 3**(1/x))**x, x, 0) == 5
|
|
"""Reorder the declarations in a header so that all dependencies
are met.
Author: Peter Goodman (peter.goodman@gmail.com)
Copyright: Copyright 2012-2013 Peter Goodman, all rights reserved.
"""
import sys
import collections
from cparser import *
parser = CParser()
seen = set()
COMPOUNDS = set()
CHANGED = False
# ORDER_DEFAULTS = {
# CTypeUse: 0.0,
# CTypeEnum: 0.4,
# CTypeFunction: 0.9,
# CTypeAttributed: 0.0,
# CTypeExpression: 0.3,
# CTypeBitfield: 0.0,
# CTypeArray: 0.4,
# CTypePointer: 0.0,
# CTypeDefinition: 0.2,
# CTypeBuiltIn: 0.0,
# CTypeUnion: 0.5,
# CTypeStruct: 0.5,
# }
# class OrderDict(object):
# def __init__(self):
# self.orders = {}
# def __getitem__(self, key):
# if key not in self.orders:
# ctype, is_value, need_value = key
# self.orders[key] = ORDER_DEFAULTS[ctype.__class__]
# return self.orders[key]
# def __setitem__(self, key, val):
# self.orders[key] = val
# return val
# order_numbers = OrderDict()
def O(*args):
print "".join(map(str, args))
# def visit_expression(ctype, expr, order_num):
# global parser
# for ref_ctype in expr.parse_types(parser):
# if ctype.base_type() is ref_ctype.base_type():
# continue
# order_num = max(
# order_num,
# 1 + visit_ctype(ref_ctype, True, True))
# return order_num
# def visit_enum(ctype, is_value, need_value, order_num):
# for expr in ctype.fields.values():
# order_num = max(
# order_num, visit_expression(ctype, expr, order_num))
# order_numbers[ctype, is_value, need_value] = order_num
# return order_num
# def visit_function(ctype, is_value, need_value, order_num):
# order_num = max(
# 1 + visit_ctype(ctype.ret_type, True, True),
# order_num)
# for param_ctype in ctype.param_types:
# if param_ctype:
# order_num = max(
# 1 + visit_ctype(param_ctype, True, True),
# order_num)
# return order_num
# def visit_attributed(ctype, is_value, need_value, order_num):
# return visit_ctype(ctype.ctype, is_value, need_value)
# def visit_typeof(ctype, is_value, need_value, order_num):
# return visit_expression(CType(), ctype.expr, order_num)
# def visit_bitfield(*args):
# # todo: types in its expression
# return visit_array(*args)
# def visit_array(ctype, is_value, need_value, order_num):
# return max(
# order_num,
# visit_ctype(ctype.ctype, is_value, need_value))
# def visit_pointer(ctype, is_value, need_value, order_num):
# intern_type = ctype.ctype.unattributed_type()
# if isinstance(intern_type, CTypeUse):
# return max(order_num, visit_ctype(ctype.ctype, False, False))
# else:
# return max(order_num, visit_ctype(ctype.ctype, True, True))
# def visit_typedef(ctype, is_value, need_value, order_num):
# orig_id = (ctype, is_value, need_value)
# intern_ctype = ctype.ctype.unattributed_type()
# # if it is a value, and it's just referring to a used type, then
# # make it appear not to be a value.
# if is_value:
# is_value = not isinstance(intern_ctype, CTypeUse)
# need_value = need_value or is_value
# # if is is not a value, but it is referring to an in-line defined
# # type then make it appear to be a value.
# elif not isinstance(intern_ctype, CTypeUse):
# is_value = True
# if not is_value and need_value:
# is_value = True
# base_type = intern_ctype.base_type()
# if isinstance(base_type, CTypeBuiltIn):
# is_value = False
# need_value = False
# order_numbers[orig_id] = 0
# return 0
# return max(
# order_num,
# 1 + visit_ctype(ctype.ctype, is_value, need_value))
# def visit_use(ctype, is_value, need_value, order_num):
# return visit_ctype(ctype.ctype, is_value, need_value)
# def visit_builtin(*args):
# return 1
# def visit_union(*args):
# return visit_struct(*args)
# def visit_struct(ctype, is_value, need_value, order_num):
# global order_numbers, COMPOUNDS
# COMPOUNDS.add(ctype)
# if not is_value:
# return 0
# for field_ctype, field_name in ctype.fields():
# order_num = max(order_num, 1 + visit_ctype(field_ctype, is_value, True))
# return order_num
# VISITORS = {
# CTypeUse: visit_use,
# CTypeEnum: visit_enum,
# CTypeFunction: visit_function,
# CTypeAttributed: visit_attributed,
# CTypeExpression: visit_typeof,
# CTypeBitfield: visit_bitfield,
# CTypeArray: visit_array,
# CTypePointer: visit_pointer,
# CTypeDefinition: visit_typedef,
# CTypeBuiltIn: visit_builtin,
# CTypeUnion: visit_union,
# CTypeStruct: visit_struct,
# }
# TAB = ""
# def visit_ctype(ctype, is_value, need_value):
# global order_numbers, seen, CHANGED
# seen_id = (ctype, is_value, need_value)
# order_num = order_numbers[seen_id]
# old_order_num = order_num
# if seen_id not in seen:
# seen.add(seen_id)
# order_num = max(
# old_order_num,
# VISITORS[ctype.__class__](
# ctype, is_value, need_value, old_order_num))
# if order_num > old_order_num:
# CHANGED = True
# order_numbers[seen_id] = order_num
# return order_num
# Returns True iff this unit should be included in the file's
# output. This mainly looks to filter out variables.
def should_include_unit(unit_decls, unit_toks, is_typedef):
global parser
if is_typedef:
return True
for ctype, name in unit_decls:
if name:
base_type = ctype.base_type()
# Variable
if not isinstance(base_type, CTypeFunction):
return False
# Don't include functions returning floating point values.
else:
base_return_type = base_type.ret_type.base_type()
if isinstance(base_return_type, CTypeBuiltIn) \
and base_return_type.is_float:
return False
# Looks like a function definition.
if "}" == unit_toks[-1].str:
return False
# Try not to include forward definitions of enums.
base_ctype = ctype.base_type()
if not isinstance(base_ctype, CTypeEnum):
continue
#if base_ctype.original_name == "ip_conntrack_infoip_conntrack_info":
# print unit_toks
# exit(ip_conntrack_infoip_conntrack_info)
is_forward_decl = True
for tok in unit_toks:
if "{" == tok.str:
is_forward_decl = False
break
if is_forward_decl:
return False
return True
# Look for duplicate global enumerator constants and remove enums
# with duplicate constants. This is a simplistic solution.
#
# This is mostly to address trivial cases of the following three
# specific problems:
# i) typedef struct foo { ... } foo;
# ii) enum { X }; ... enum { X };
# iii) struct foo { }; ... struct foo { };
def process_redundant_decls(units):
units = list(units)
enum_constants = set()
typedefs = set()
compounds = {
CTypeUnion: set(),
CTypeStruct: set()
}
open_comment = CToken("/*", CToken.COMMENT)
close_comment = CToken("*/", CToken.COMMENT)
T = 0
for unit_decls, unit_toks, is_typedef in units:
for ctype, name in unit_decls:
ctype = ctype.unattributed_type()
base_ctype = ctype.base_type()
# look for duplicate definitions of enumerator constants
# and delete one of the enums (containing the duplicate
# constant).
if isinstance(base_ctype, CTypeEnum):
if ctype.is_type_use():
continue
# the union has no name; delete it. This helps us avoid
# issues in the kernel where an enumerator constant's value
# is dependent on the return from an inline function, which
# itself is stripped from the types.
#
# we just hope that we haven't deleted an enum where one of
# the deleted enumerator constants is referenced elsewhere.
#if not base_ctype.has_name:
# assert len(unit_decls) == 1
# #unit_toks.insert(0, open_comment)
# #unit_toks.append(close_comment)
# del unit_toks[:]
# break
for name in base_ctype.field_list:
if name in enum_constants:
assert len(unit_decls) == 1
#unit_toks.insert(0, open_comment)
#unit_toks.append(close_comment)
del unit_toks[:]
break
else:
enum_constants.add(name)
# look for duplicate definitions of structs / unions
# and delete the most recently found type.
elif isinstance(base_ctype, CTypeStruct) \
or isinstance(base_ctype, CTypeUnion):
handled = False
while not ctype.is_type_use():
# try to distinguish a forward declaration from the real
# definition. The types of the two things will be resolved
if ";" == unit_toks[-1].str \
and CToken.TYPE_USER == unit_toks[-2].kind \
and CToken.TYPE_SPECIFIER == unit_toks[-3].kind:
break
names = compounds[base_ctype.__class__]
if base_ctype.internal_name in names:
assert len(unit_decls) == 1
#unit_toks.insert(0, open_comment)
#unit_toks.append(close_comment)
del unit_toks[:]
handled = True
else:
names.add(base_ctype.internal_name)
break
# no naming conflict
if handled or name != base_ctype.original_name:
continue
# hard case, we need to preserve the struct/union
# definition; we will try to do this by just renaming
# the typedef'd name.
if not ctype.is_type_use():
#print name, base_ctype.original_name, unit_toks
#assert CToken.TYPE_USER == unit_toks[-2].kind
unit_toks[-2].str += str(T)
T += 1
continue
# easy case, delete the typedef: because it's used in
# C++, it means the compiler will resolve the correct
# type.
else:
#unit_toks.insert(0, open_comment)
#unit_toks.append(close_comment)
del unit_toks[:]
continue
elif isinstance(ctype, CTypeDefinition):
if ctype.name in typedefs or ctype.name == "wchar_t":
del unit_toks[:]
else:
typedefs.add(ctype.name)
# Remove __attribute__ ( ... ) forms from functions.
#
# Args:
# toks: A list of tokens representing a function
# declaration and which contains zero-or-more
# function attributes.
#
# Returns:
# A list of tokens without any function attributes.
def remove_function_attributes(decls, toks):
for (ctype, _) in decls:
if not isinstance(ctype.base_type(), CTypeFunction):
return toks
new_toks = []
count_parens = False
paren_count = 0
i = 0
for tok in toks:
i += 1
if count_parens:
if "(" == tok.str:
paren_count += 1
elif ")" == tok.str:
paren_count -= 1
count_parens = paren_count > 0
continue
if tok.str in ("__attribute__", "attribute__", "__attribute", "declspec"):
count_parens = "(" == toks[i].str
paren_count = 0
continue
new_toks.append(tok)
return new_toks
# Remove `extern "C"` from `toks`.
def remove_extern_c(toks):
new_toks = []
i = 0
while i < len(toks):
tok = toks[i]
i += 1
if tok.str == "extern":
if i < len(toks):
if toks[i].str.upper() == '"C"':
i += 1
continue
new_toks.append(tok)
return new_toks
ENUM_ID = 0
# Add `: int` into the enum definition just before the opening `{`.
def add_int_storage_to_enum(toks):
global ENUM_ID
i = 0
found = False
while i < len(toks):
if toks[i].str == "{":
found = True
break
i += 1
if found:
toks.insert(i, CToken('int', CToken.TYPE_BUILT_IN))
toks.insert(i, CToken(':', CToken.OPERATOR))
if i == 1: # Unnamed enum, add in a name
toks.insert(i, CToken('anon_reorder_enum_{}'.format(ENUM_ID), CToken.TYPE_USER))
ENUM_ID += 1
return toks
# Process the units of C type, function, and variable declarations
# as generated by the CParser.
#
# Outputs:
# Re-ordered declarations where ordering is determined by type
# dependencies.
def process_units(units):
#global order_numbers, seen, CHANGED
decls = []
# Initialise the order numbers
unit_num = 0
units = list(units)
for unit_decls, _, _ in units:
decls.extend(unit_decls)
# # Refine until we have an ordering.
# CHANGED = True
# while CHANGED:
# CHANGED = False
# for ctype, name in decls:
# old_order_num = order_numbers[ctype, True, False]
# new_order_num = visit_ctype(ctype, True, False)
# seen.clear()
# Emit the ordered units.
#new_toks = collections.defaultdict(list)
forward_typedefs = []
lines = []
for unit_decls, unit_toks, is_typedef in units:
if not should_include_unit(unit_decls, unit_toks, is_typedef):
continue
# Remove attributes from non-type declarations.
unit_toks = remove_function_attributes(unit_decls, unit_toks)
# Remove redundant `extern "C"`s from the code.
unit_toks = remove_extern_c(unit_toks)
for ctype, name in unit_decls:
if isinstance(ctype, (CTypeStruct, CTypeUnion)):
O(ctype.name, ";")
#elif isinstance(ctype, CTypeEnum):
# O(ctype.name, " : int;");
# unit_toks = add_int_storage_to_enum(unit_toks)
lines.append(unit_toks)
for line in lines:
O(" ".join(t.str for t in line))
# # Get a canonical order number for this unit that accounts
# # for the ordering of any types defined within the unit.
# max_order_num = 0
# has_name = False
# for ctype, name in unit_decls:
# if name and not is_typedef:
# has_name = True
# max_order_num = max(
# max_order_num, order_numbers[ctype, True, False])
# if has_name:
# max_order_num = sys.maxint
# # Add the tokens for this unit of declarations into the
# # total ordering.
# new_toks[max_order_num].append("\n")
# new_toks[max_order_num].extend(t.str for t in unit_toks)
# toks = []
# for i in sorted(new_toks.keys()):
# toks.extend(new_toks[i])
# # output forward declarations for structs and unions
# global COMPOUNDS
# for ctype in COMPOUNDS:
# O(ctype.name, ";")
# # output the tokens
# buff = " ".join(toks).split("\n")
# for line in buff:
# O(line.strip(" \n"))
if "__main__" == __name__:
import sys
macro_defs, source_lines = [], []
with open(sys.argv[1]) as lines_:
for line in lines_:
if line.startswith("#"):
macro_defs.append(line)
else:
source_lines.append(line)
tokens = CTokenizer(source_lines)
units = parser.parse_units(tokens)
process_redundant_decls(units)
O("".join(macro_defs))
process_units(units)
|
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface for binaries built around predictor.
To use: subclass PredictorRunnerBase, implement the InputGenerator and RunBatch
functions, and call Run().
To run on TPU, set:
--device_type=tpu
--xla_device=tpu
--tf_master=url/to/tpu/server
--inference_threads=num_tpu_cores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import concurrent.futures
import itertools
import os
import re
import threading
import time
from absl import flags
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo import compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import predictor
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils
import six
from six.moves import range
flags.DEFINE_string(
'checkpoint', None, 'Either a checkpoint file to load,'
' or a directory containing multiple checkpoints, where'
' the latest checkpoint will be loaded.')
flags.DEFINE_string(
'inference_graph', None, 'Path to an inference graph. '
'If not specified, will be inferred from the checkpoint path.')
flags.DEFINE_string(
'inference_subgraph_name', '', 'The name of the inference subgraph to use. '
'Defaults to the default subgraph.')
flags.DEFINE_enum('device_type', 'gpu', ['cpu', 'gpu', 'tpu'], 'Device type.')
flags.DEFINE_string('tf_master', 'local', 'tf_master for predictor session.')
flags.DEFINE_integer('inference_threads', '1', 'Number of inference threads.')
flags.DEFINE_integer('batch_size', 64, 'Batch size.')
flags.DEFINE_integer(
'prediction_step_interval', 3000, 'Number of steps between outputs. '
'Only meaningful if FLAGS.checkpoint is a directory.')
flags.DEFINE_integer('max_inputs', 0, 'Only process the first n inputs.')
flags.DEFINE_list(
'input_id_filter', [],
'If not empty, only process the input ids in the given list.')
flags.DEFINE_string(
'output_dir', None, 'Output directory. '
'If FLAGS.checkpoint is a directory, a subdirectory will be created '
'for each checkpoint evaluated.')
flags.DEFINE_integer(
'output_num_shards', 1, 'Each replica generates one shard of output '
'according to --output_shard_id.')
flags.DEFINE_integer(
'output_shard_id', 0,
'The output shard id in range [0, output_num_shards - 1].')
FLAGS = flags.FLAGS
_RETRY_SLEEP_SECONDS = 10
class PredictorRunnerBase(object):
"""Manages state for running predictor.
Abstract class. Subclasses should override `InputGenerator` and `RunBatch`.
Call `Subclass().Run()` in `main()` function to run.
"""
def __init__(self,
checkpoint,
output_dir=None,
inference_graph=None,
inference_subgraph_name='',
device_type='cpu',
output_num_shards=1,
output_shard_id=0,
max_inputs=0,
input_id_filter=None,
tf_master='local',
inference_threads=1,
batch_size=64,
prediction_step_interval=3000):
"""Constructor.
Args:
checkpoint: Either a checkpoint file to load, or a directory containing
multiple checkpoints, where the latest checkpoint will be loaded.
output_dir: Output directory. If `checkpoint` is a directory, a
subdirectory will be created for each checkpoint evaluated.
inference_graph: Path to an inference graph. If not specified, will be
inferred from the checkpoint path.
inference_subgraph_name: The name of the inference subgraph to use.
Defaults to the default subgraph.
device_type: Device type, either cpu, gpu, or tpu.
output_num_shards: Each replica generates one shard of output according to
`output_shard_id`.
output_shard_id: The output shard id in range `[0, output_num_shards -
1]`.
max_inputs: Only process the first n inputs. 0 means process all inputs.
input_id_filter: If not empty, only process the input ids in the given
list.
tf_master: tf_master for predictor session.
inference_threads: Number of inference threads.
batch_size: Batch size.
prediction_step_interval: Number of steps between outputs. Only meaningful
if `checkpoint` is a directory.
"""
self._checkpoint = checkpoint
self._output_dir = output_dir
self._output_num_shards = output_num_shards
self._output_shard_id = output_shard_id
self._max_inputs = max_inputs
input_id_filter = input_id_filter or []
self._input_id_filter = [str(x) for x in input_id_filter]
self._batch_size = batch_size
self._prediction_step_interval = prediction_step_interval
if device_type == 'tpu' and FLAGS.xla_device != 'tpu':
raise ValueError('xla_device=tpu should be set with device_type=tpu!')
if tf.io.gfile.isdir(self._checkpoint):
initial_checkpoint = tf.train.latest_checkpoint(self._checkpoint)
while (not initial_checkpoint or
not tf.io.gfile.exists(initial_checkpoint + '.index')):
tf.logging.log_first_n(tf.logging.INFO,
'Waiting for checkpoint to be available.',
10)
time.sleep(_RETRY_SLEEP_SECONDS)
initial_checkpoint = tf.train.latest_checkpoint(self._checkpoint)
else:
initial_checkpoint = self._checkpoint
if not tf.io.gfile.exists(initial_checkpoint + '.index'):
raise ValueError('Could not find checkpoint %s' % initial_checkpoint)
# Use saved inference graph.
if inference_graph:
self._inference_graph = inference_graph
else:
checkpoint_dir = self._checkpoint
if not tf.io.gfile.isdir(checkpoint_dir):
checkpoint_dir = os.path.dirname(checkpoint_dir)
logdir = os.path.dirname(checkpoint_dir)
inference_graph_filename = 'inference.pbtxt'
if device_type == 'tpu':
inference_graph_filename = 'inference_tpu.pbtxt'
self._inference_graph = os.path.join(logdir, 'inference_graphs',
inference_graph_filename)
self._predictor = predictor.Predictor(
inference_graph=self._inference_graph,
subgraph_name=inference_subgraph_name,
checkpoint=initial_checkpoint,
device_type=device_type,
tf_master=tf_master)
self._threadpool = concurrent.futures.ThreadPoolExecutor(inference_threads)
self._locks = [threading.Lock() for _ in range(inference_threads)]
@classmethod
def FromFlags(cls, **kwargs):
"""Constructs an instance of this class from FLAGS."""
return cls(
checkpoint=FLAGS.checkpoint,
output_dir=FLAGS.output_dir,
inference_graph=FLAGS.inference_graph,
inference_subgraph_name=FLAGS.inference_subgraph_name,
device_type=FLAGS.device_type,
output_num_shards=FLAGS.output_num_shards,
output_shard_id=FLAGS.output_shard_id,
max_inputs=FLAGS.max_inputs,
input_id_filter=FLAGS.input_id_filter,
tf_master=FLAGS.tf_master,
inference_threads=FLAGS.inference_threads,
batch_size=FLAGS.batch_size,
prediction_step_interval=FLAGS.prediction_step_interval,
**kwargs)
def _ShouldProcessInputId(self, input_id):
if self._max_inputs > 0 and input_id >= self._max_inputs:
return False
if self._input_id_filter and str(input_id) not in self._input_id_filter:
return False
return input_id % self._output_num_shards == self._output_shard_id
def _OutputFilename(self, output_dir, name):
assert self._output_shard_id >= 0
assert self._output_shard_id < self._output_num_shards
return '%s-%.5d-of-%.5d' % (os.path.join(
output_dir, name), self._output_shard_id, self._output_num_shards)
def InputGenerator(self):
"""Generator that yields the next input.
Must yield in a deterministic order or raise an exception when
self._output_num_shards > 1.
"""
raise NotImplementedError('Abstract method.')
def RunBatch(self, output_dir, batch):
"""Runs predictor on a single batch of data.
Args:
output_dir: the output directory.
batch: a list of (input_id, element) pairs, where element is yielded from
InputGenerator and input_id is a unique counter starting from 0.
"""
raise NotImplementedError('Abstract method.')
def _PredictOneCheckpoint(self, checkpoint, output_dir):
"""Runs predictor."""
tf.logging.info('Processing checkpoint %s.', checkpoint)
self._predictor.Load(checkpoint)
def LockedRunBatch(batch, batch_id):
"""TPU inference runs the i-th batch on the i%num_cores-th core.
Make sure that core is available before scheduling the next batch on it.
Args:
batch: The input to be passed to RunBatch.
batch_id: The id of this batch, which determins which core it runs on.
"""
with self._locks[batch_id % len(self._locks)]:
self.RunBatch(output_dir, batch)
batch_id = 0
batch = []
futures = []
# Iterate through the input and process it one batch at a time.
it = self.InputGenerator()
if self._max_inputs > 0:
it = itertools.islice(it, self._max_inputs)
for next_id, element in enumerate(it):
if self._ShouldProcessInputId(next_id):
batch.append((next_id, element))
if len(batch) == self._batch_size:
futures.append(
self._threadpool.submit(LockedRunBatch, batch, batch_id))
batch_id += 1
batch = []
# Last batch.
if batch:
futures.append(self._threadpool.submit(LockedRunBatch, batch, batch_id))
# Wait for completion.
for f in futures:
f.result()
def _PredictContinuously(self):
"""Waits for new checkpoints and runs predictor continuously."""
prev_step = -1000000
while True:
# TODO(jonathanasdf): how to determine when training finished?
path = tf.train.latest_checkpoint(self._checkpoint)
step_str = re.search(r'ckpt-(\d{8})', six.ensure_str(path)).group(1)
step = int(step_str)
if step - prev_step >= self._prediction_step_interval:
if not self._output_dir:
raise ValueError(
'output_dir must be specified for _PredictContinuously.')
output_dir = os.path.join(self._output_dir, 'step_' + step_str)
tf.io.gfile.makedirs(output_dir)
self._PredictOneCheckpoint(path, output_dir)
prev_step = step
tf.logging.info('Waiting for next checkpoint...')
time.sleep(_RETRY_SLEEP_SECONDS)
@py_utils.RetryOnTransientTfError()
def Run(self):
"""Monitor checkpoints and runs predictor."""
if self._output_dir:
tf.io.gfile.makedirs(self._output_dir)
if tf.io.gfile.isdir(self._checkpoint):
self._PredictContinuously()
else:
self._PredictOneCheckpoint(self._checkpoint, self._output_dir)
|
|
"""
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
from django.db.backends import util
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured, "Error loading MySQLdb module: %s" % e
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1,2,1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
raise ImportError, "MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE
import types
import re
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# MySQLdb-1.2.1 supports the Python boolean type, and only uses datetime
# module for time-related columns; older versions could have used mx.DateTime
# or strings if there were no datetime module. However, MySQLdb still returns
# TIME columns as timedelta -- they are more like timedelta in terms of actual
# behavior as they are signed and include days -- and Django expects time, so
# we still need to override that.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# Import copy of _thread_local.py from Python 2.4
from django.utils._threading_local import local
class DatabaseWrapper(local):
def __init__(self, **kwargs):
self.connection = None
self.queries = []
self.server_version = None
self.options = kwargs
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def cursor(self):
from django.conf import settings
from warnings import filterwarnings
if not self._valid_connection():
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': False,
}
if settings.DATABASE_USER:
kwargs['user'] = settings.DATABASE_USER
if settings.DATABASE_NAME:
kwargs['db'] = settings.DATABASE_NAME
if settings.DATABASE_PASSWORD:
kwargs['passwd'] = settings.DATABASE_PASSWORD
if settings.DATABASE_HOST.startswith('/'):
kwargs['unix_socket'] = settings.DATABASE_HOST
elif settings.DATABASE_HOST:
kwargs['host'] = settings.DATABASE_HOST
if settings.DATABASE_PORT:
kwargs['port'] = int(settings.DATABASE_PORT)
kwargs.update(self.options)
self.connection = Database.connect(**kwargs)
cursor = self.connection.cursor()
else:
cursor = self.connection.cursor()
if settings.DEBUG:
filterwarnings("error", category=Database.Warning)
return util.CursorDebugWrapper(cursor, self)
return cursor
def _commit(self):
if self.connection is not None:
self.connection.commit()
def _rollback(self):
if self.connection is not None:
try:
self.connection.rollback()
except Database.NotSupportedError:
pass
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def get_server_version(self):
if not self.server_version:
if not self._valid_connection():
self.cursor()
m = server_version_re.match(self.connection.get_server_info())
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % self.connection.get_server_info())
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
supports_constraints = True
def quote_name(name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
dictfetchone = util.dictfetchone
dictfetchmany = util.dictfetchmany
dictfetchall = util.dictfetchall
def get_last_insert_id(cursor, table_name, pk_name):
return cursor.lastrowid
def get_date_extract_sql(lookup_type, table_name):
# lookup_type is 'year', 'month', 'day'
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), table_name)
def get_date_trunc_sql(lookup_type, field_name):
# lookup_type is 'year', 'month', 'day'
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def get_limit_offset_sql(limit, offset=None):
sql = "LIMIT "
if offset and offset != 0:
sql += "%s," % offset
return sql + str(limit)
def get_random_function_sql():
return "RAND()"
def get_deferrable_sql():
return ""
def get_fulltext_search_sql(field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def get_drop_foreignkey_sql():
return "DROP FOREIGN KEY"
def get_pk_default_value():
return "DEFAULT"
def get_sql_flush(style, tables, sequences):
"""Return a list of SQL statements required to remove all data from
all tables in the database (without actually removing the tables
themselves) and put the database in an empty 'initial' state
"""
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;'] + \
['%s %s;' % \
(style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(quote_name(table))
) for table in tables] + \
['SET FOREIGN_KEY_CHECKS = 1;']
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def get_sql_sequence_reset(style, model_list):
"Returns a list of the SQL statements to reset sequences for the given models."
# No sequence reset required
return []
OPERATOR_MAPPING = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
|
|
# -*- coding: utf-8 -*-
"""
Base model container.
Provides a base model container, used by the Pyramid traversal
machinery and a mixin to aid with traversal from an instance
back up the tree.
"""
__all__ = [
"BaseModelContainer",
"InstanceTraversalMixin",
]
import re
import logging
from zope.interface import implementer
from zope.interface import alsoProvides
from sqlalchemy.exc import InvalidRequestError
from pyramid.interfaces import ILocation
from pyramid.security import ALL_PERMISSIONS
from pyramid.security import Allow, Deny
from pyramid.security import Authenticated, Everyone
from pyramid_basemodel import Session
from pyramid_basemodel.interfaces import IModelContainer
from pyramid_basemodel.root import BaseRoot
valid_slug = re.compile(r"^[.\w-]{1,64}$", re.U)
logger = logging.getLogger(__name__)
def slug_validator(node, value, regexp=valid_slug):
"""
Validate slug.
Defaults to using a slug regexp.
"""
# Raise a ValueError.
if not regexp.match(value):
raise ValueError(f"{value} is not a valid slug.")
@implementer(IModelContainer)
class BaseModelContainer(BaseRoot):
"""Traversal factory that looks up model classes by property."""
property_name = "slug"
validation_exception = Exception
@property
def _validator(self):
return slug_validator
# Default container acl to be private whilst granting authenticated
# users create permission.
__acl__ = [
(Allow, "r:admin", ALL_PERMISSIONS),
(Allow, Authenticated, "view"),
(Allow, Authenticated, "create"),
(Deny, Everyone, ALL_PERMISSIONS),
]
@property
def name(self):
"""Return plurar version of a class name."""
return self.model_cls.plural_class_name
@property
def class_name(self):
"""Determine class name based on the _class_name or the __tablename__."""
return self.model_cls.class_name
@property
def plural_class_name(self):
"""Return plurar version of a class name."""
return self.model_cls.plural_class_name
@property
def class_slug(self):
"""Class slug based on either _class_slug or __tablename__."""
return self.model_cls.class_slug
def get_child(self, key):
"""Query for and return the child instance, if found."""
column = getattr(self.model_cls, self.property_name)
query = self.model_cls.query.filter(column == key)
return query.first()
def __getitem__(self, key):
"""Lookup model instance by key."""
try:
self.validator(None, key)
except self.validation_exception:
raise KeyError(key)
context = self.get_child(key)
if not context:
raise KeyError(key)
return self.locatable(context, key)
def __init__(self, request, model_cls, key=None, parent=None, **kwargs):
"""Instantiate the container."""
# Compose.
if key is None:
key = model_cls.class_slug
if parent is None:
parent = BaseRoot(request)
self.request = request
self.model_cls = model_cls
self.__name__ = key
self.__parent__ = parent
if "property_name" in kwargs:
self.property_name = kwargs.get("property_name")
if "validator" in kwargs:
self.validator = kwargs.get("validator")
else:
self.validator = self._validator
class InstanceTraversalMixin:
"""Provide a default __parent__ implementation for traversal."""
request = None
traversal_key_name = "slug"
validation_exception = Exception
@property
def _validator(self):
return slug_validator
@property
def _base_child_query(self):
return self.query
def get_container(self):
"""Reverse up the parent traversal hierarchy until reaching a container."""
target = self
while True:
parent = target.__parent__
if not parent:
return None
if IModelContainer.providedBy(parent):
return parent
target = parent
def locatable(self, context, key, provides=alsoProvides):
"""Make a context object locatable and pass on the request."""
if not hasattr(context, "__name__"):
context.__name__ = key
context._located_parent = self
context.request = self.request
if not ILocation.providedBy(context):
provides(context, ILocation)
return context
@property
def __parent__(self, container_cls=BaseModelContainer, session=Session):
"""Either return ``self.parent``, or a model container object."""
# If the context has been located, return the container.
if hasattr(self, "_located_parent"):
return self._located_parent
# Add self to the session to avoid ``DetachedInstanceError``s.
session.add(self)
# If the model has a parent, return it.
parent = getattr(self, "parent", None)
if parent:
return parent
# Otherwise instantiate a "fake" traversal container and return that.
# It's "fake" because it doesn't know about it's parent and doesn't
# have a copy of the request.
container = container_cls(None, self.__class__)
return container
def __getitem__(self, key):
"""Lookup model instance by key."""
try:
self._validator(None, key)
except self.validation_exception:
raise KeyError(key)
# Only lookup children from instances that have them.
has_children = hasattr(self, "children")
if not has_children:
raise KeyError(key)
# Only lookup if the target column exists.
column = getattr(self.__class__, self.traversal_key_name, None)
if not column:
raise KeyError(key)
try:
query = self._base_child_query
query = query.filter_by(parent=self).filter(column == key)
context = query.first()
if not context:
raise KeyError(key)
except InvalidRequestError as err:
# If the query was invalid, the lookup fails, e.g.: if the
# instance had the requisit properties but they weren't actually
# sqlalchemy columns.
logger.warning(err, exc_info=True)
raise KeyError(key)
# Return the context, having set the parent and flagged as locatable.
return self.locatable(context, key)
|
|
"""
Copyright (c) 2014 Dan Obermiller
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
You should have received a copy of the MIT License along with this program.
If not, see <http://opensource.org/licenses/MIT>
"""
from functools import partial
from multiprocessing import dummy
import datetime
import logging
import select
import socket
import time
import threading
now = datetime.datetime.now()
logging.basicConfig(filename=''.join(map(str,
["Logs/",
now.year,
now.month,
now.day,
".log"])),
level=logging.INFO)
class IRC_member(object):
"""Class to represnt an individual using IRC, storing (non-sensitive)
information
"""
def __init__(self, nick, **kwargs):
"""Constructor for IRC_member. Stores nickname, realname and ident
as well as info about servers and channels
"""
self.nick = nick
self.realname = nick
self.ident = nick
for key, value in kwargs.iteritems():
self.__dict__[key] = value
# This is a mapping of server name to socket being used
# {
# "some_server": socket1,
# "other_server": socket2
# }
self.servers = {}
# This is a mapping of server name to channel name
# {
# "some_server": ["#a_channel", "#another-channel"],
# "other_server": ["#lonely-channel"]
# }
self.serv_to_chan = {}
# This is a mapping of server name to information if it differs
# {
# "some_server": {
# nick: "Mynick",
# realname: "realname",
# etc
# }
# }
# All values not in this are assumed to be self.nick, etc
self.serv_to_data = {}
## Used to get the replies from all sockets
self.lock = threading.Lock()
self.replies = {}
def send_server_message(self, hostname, message):
"""Sends a message to a server"""
if hostname not in self.servers:
logging.warning("No such server {}".format(hostname))
logging.warning("Failed to send message {}".format(message))
return 1
sock = self.servers[hostname]
try:
sock.send("{} \r\n".format(message.rstrip()))
except socket.error as e:
logging.exception(e)
logging.warning("Failed to send message {}".format(message))
return 2
else:
return 0
def send_channel_message(self, hostname, chan_name, message):
"""Sends a message to a channel"""
if hostname not in self.servers:
logging.warning("Not connected to server {}".format(hostname))
logging.warning("Failed to send message {}".format(message))
return 1
elif chan_name not in self.serv_to_chan[hostname]:
logging.warning("Not in channel {}".format(chan_name))
logging.warning("Failed to send message {}".format(message))
return 2
else:
try:
sock = self.servers[hostname]
sock.send("PRIVMSG {} :{}\r\n".format(chan_name,
message.rstrip()))
except socket.error as e:
logging.exception(e)
logging.warning("Failed to send message {}".format(message))
return 3
else:
return 0
def send_privmsg(self, hostname, username, message):
"""Sends a private message to a user"""
if hostname not in self.servers:
logging.warning("No such server {}".format(hostname))
logging.warning("Failed to send message {}".format(message))
return 1
## TODO: Have a test to check for valid users
## Should return 2
## if username not in ____: ...
logging.warn("Needs implementing to check for valid users")
sock = self.servers[hostname]
try:
sock.send("PRIVMSG {} :{}\r\n".format(username, message.rstrip()))
except socket.error as e:
logging.exception(e)
logging.warning("Failed to send message {}".format(message))
return 3
else:
return 0
def ping_pong(self, sock, data):
"""Pongs the server"""
try:
sock.send("PONG {}\r\n".format(data))
except socket.error as e:
logging.exception(e)
logging.warn("Couldn't pong the server")
return 1
else:
return 0
def join_server(self, hostname, port=6667, **kwargs):
"""Joins a server"""
if hostname in self.servers:
logging.warn("Already connected to {}".format(hostname))
return 0
nick = self.nick
ident = self.ident
realname = self.realname
## Checking if the data for this server is different from the defaults
if kwargs:
self.serv_to_data[hostname] = {}
for key, value in kwargs.items():
if key in self.__dict__:
self.serv_to_data[hostname][key] = value
locals()[key] = value
else:
logging.info("key-value pair {}: {} unusued".format(key, value))
if not self.serv_to_data[hostname]:
del self.serv_to_data[hostname]
try:
ip = socket.gethostbyname(hostname) ## throws gaierror 11004
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
self.servers[hostname] = sock
self.serv_to_chan[hostname] = []
sock.settimeout(2)
self.send_server_message(hostname, "NICK {}\r\n".format(nick))
self.send_server_message(hostname,
"USER {} {} bla: {}\r\n".format(nick,
ident,
realname))
except socket.gaierror as e: ## couldn't resolve hostname
logging.exception(e)
return 1
except socket.error as e:
logging.exception(e)
if port != 6667:
logging.warning("Consider using port 6667 (the defacto IRC port)")
return 2
else:
logging.info("Connected to {} on {}".format(hostname, port))
return 0
def leave_server(self, hostname):
"""Leaves a server"""
if hostname not in self.servers:
logging.warning("Not connected to {}".format(hostname))
return 0
try:
self.send_server_message(hostname, "QUIT\r\n")
self.servers[hostname].close()
except socket.error as e:
logging.exception(e)
logging.warning("Failed to leave server {}".format(hostname))
return 1
else:
try:
del self.servers[hostname]
finally:
try:
del self.serv_to_chan[hostname]
finally:
try:
if self.serv_to_data[hostname]:
del self.serv_to_data[hostname]
finally:
logging.info("Left server {}".format(hostname))
return 0
def join_channel(self, hostname, chan_name):
"""Joins a channel"""
if chan_name in self.serv_to_chan[hostname]:
logging.warning("Already connected to {} on {}".format(hostname, chan_name))
return 0
if chan_name.startswith("#"):
try:
self.send_server_message(hostname,
"JOIN {}\r\n".format(chan_name))
except socket.error as e:
logging.exception(e)
logging.warning("Failed to connect to {}".format(chan_name))
return 1
else:
self.serv_to_chan[hostname].append(chan_name)
logging.info("Connected to {}".format(chan_name))
return 0
else:
logging.warning("Channel names should look like #{}".format(chan_name))
return 2
def leave_channel(self, hostname, chan_name):
"""Leaves a channel"""
if hostname not in self.servers:
logging.warning("No such server {}".format(hostname))
return 1
elif chan_name not in self.serv_to_chan[hostname]:
logging.warning("No such channel {}".format(chan_name))
return 0
else:
try:
self.send_server_message(hostname,
"PART {}\r\n".format(chan_name))
except socket.error as e:
logging.exception(e)
logging.warning("Failed to leave {}".format(chan_name))
return 2
else:
self.serv_to_chan[hostname].remove(chan_name)
logging.info("Left channel {}".format(chan_name))
return 0
def receive_all_messages(self, buff_size=4096):
"""Checks all servers connected to for any messages, then displays any
that may be waiting"""
ready, _, _ = select.select(self.servers.values(), [], [], 5)
if ready:
for i in range(len(ready)):
for host, sock in self.servers.iteritems():
if sock == ready[i]:
ready[i] = host
try:
pool = dummy.Pool()
pool.map(partial(self.receive_message,
bsize=buff_size),
(tuple(ready),))
with self.lock:
replies, self.replies = self.replies, {}
for server, reply in replies.iteritems():
print "{} :\n\n".format(server)
for message in reply:
print " {}".format(message)
return 0
except socket.error as e:
logging.exception(e)
logging.warning("Failed to get messages")
return 1
else:
return 0
def receive_message(self, hostname, bsize=4096):
"""Recieves messages from a single server. Has already checked that
there is a message waiting
"""
hostname = hostname[0]
reply = []
sock = self.servers[hostname]
while True:
try:
readbuffer = sock.recv(bsize)
if not readbuffer: break
temp = readbuffer.split("\n")
readbuffer = temp.pop()
for line in temp:
line = line.rstrip().split()
if (line[0] == "PING"):
self.ping_pong(sock, line[1])
else:
line = " ".join(line)
reply.append(line)
except socket.error: break
with self.lock:
try:
if reply not in self.replies[hostname]:
self.replies[hostname] += reply
except KeyError:
self.replies[hostname] = reply
def __del__(self):
for host, sock in self.servers.items():
self.leave_server(host)
if __name__ == "__main__":
NICK = "Dannnno" # raw_input("Please enter your nickname ")
#USER = raw_input("Please enter your user name ")
#REAL = raw_input("Please enter your 'real' name ")
HOST = "irc.foonetic.net" # raw_input("Please enter your desired server ")
CHAN = "#tchannel" # raw_input("Please enter your desired channel ")
me = IRC_member(NICK)
me.join_server(HOST, nick='test', ident='test', realname='test')
time.sleep(1)
me.receive_all_messages()
me.join_channel(HOST, CHAN)
time.sleep(1)
me.receive_all_messages()
i = 0
while i < 100:
start = time.time()
msg = raw_input("Would you like to say something? ")
if msg == 'n': break
if msg.rstrip():
me.send_channel_message(HOST, CHAN, msg)
me.receive_all_messages()
end = time.time()
if (end-start) < 5:
time.sleep(int(5-(end-start)))
i += 1
del me
|
|
# file django/forms/fields.py
#
# Copyright 2010 Emory University General Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Custom generic form fields for use with Django forms.
See also :class:`~eulcore.django.forms.captchafield.ReCaptchaField`
----
'''
import re
from django.core.validators import RegexValidator
from django.forms import CharField, ChoiceField
from django.forms.widgets import Select, TextInput, Widget
from django.utils.safestring import mark_safe
# regular expression to validate and parse W3C dates
W3C_DATE_RE = re.compile(r'^(?P<year>\d{4})(?:-(?P<month>[0-1]\d)(?:-(?P<day>[0-3]\d))?)?$')
validate_w3c_date = RegexValidator(W3C_DATE_RE,
u'Enter a valid W3C date in one of these formats: YYYY, YYYY-MM, or YYYY-MM-DD',
'invalid')
class W3CDateWidget(Widget):
'''Multi-part date widget that generates three text input boxes for year,
month, and day. Expects and generates dates in any of these W3C formats,
depending on which fields are filled in: YYYY-MM-DD, YYYY-MM, or YYYY.
'''
# based in part on SelectDateWidget from django.forms.extras.widgets
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def value_from_datadict(self, data, files, name):
'''Generate a single value from multi-part form data. Constructs a W3C
date based on values that are set, leaving out day and month if they are
not present.
:param data: dictionary of data submitted by the form
:param files: - unused
:param name: base name of the form field
:returns: string value
'''
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == 'YYYY':
y = ''
if m == 'MM':
m = ''
if d == 'DD':
d = ''
date = y
if m:
date += '-%s' % m
if d:
date += '-%s' % d
return date
def render(self, name, value, attrs=None):
'''Render the widget as HTML inputs for display on a form.
:param name: form field base name
:param value: date value
:param attrs: - unused
:returns: HTML text with three inputs for year/month/day
'''
# expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None)
year, month, day = 'YYYY', 'MM', 'DD'
if value:
# use the regular expression to pull out year, month, and day values
# if regular expression does not match, inputs will be empty
match = W3C_DATE_RE.match(value)
if match:
date_parts = match.groupdict()
year = date_parts['year']
month = date_parts['month']
day = date_parts['day']
year_html = self.create_textinput(name, self.year_field, year, size=4, title='4-digit year', onClick='javascript:if(this.value == "YYYY") { this.value = "" };')
month_html = self.create_textinput(name, self.month_field, month, size=2, title='2-digit month', onClick='javascript:if(this.value == "MM") { this.value = "" };')
day_html = self.create_textinput(name, self.day_field, day, size=2, title='2-digit day', onClick='javascript:if(this.value == "DD") { this.value = "" };')
# display widget fields in YYYY-MM-DD order to match W3C date format,
# and putting required field(s) on the left
output = [year_html, month_html, day_html]
return mark_safe(u' / \n'.join(output))
def create_textinput(self, name, field, value, **extra_attrs):
'''Generate and render a :class:`django.forms.widgets.TextInput` for
a single year, month, or day input.
If size is specified in the extra attributes, it will also be used to
set the maximum length of the field.
:param name: base name of the input field
:param field: pattern for this field (used with name to generate input name)
:param value: initial value for the field
:param extra_attrs: any extra widget attributes
:returns: rendered HTML output for the text input
'''
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
# use size to set maximum length
if 'size' in extra_attrs:
extra_attrs['maxlength'] = extra_attrs['size']
local_attrs = self.build_attrs(id=field % id_, **extra_attrs)
input = TextInput()
return input.render(field % name, value, local_attrs)
class W3CDateField(CharField):
'''W3C date field that uses a :class:`~eulcore.django.forms.fields.W3CDateWidget`
for presentation and uses a simple regular expression to do basic validation
on the input (but does not actually test that it is a valid date).
'''
widget = W3CDateWidget
default_error_messages = {
'invalid': u'Enter a date in one of these formats: YYYY, YYYY-MM, or YYYY-MM-DD',
}
default_validators = [validate_w3c_date]
class DynamicSelect(Select):
'''A :class:`~django.forms.widgets.Select` widget whose choices are not
static, but instead generated dynamically when referenced.
:param choices: callable; this will be called to generate choices each
time they are referenced.
'''
def __init__(self, attrs=None, choices=None):
# Skip right over Select and go to its parents. Select just sets
# self.choices, which will break since it's a property here.
super(Select, self).__init__(attrs)
if choices is None:
choices = lambda: ()
self._choices = choices
def _get_choices(self):
return self._choices()
def _set_choices(self, choices):
self._choices = choices
choices = property(_get_choices, _set_choices)
class DynamicChoiceField(ChoiceField):
'''A :class:`django.forms.ChoiceField` whose choices are not static, but
instead generated dynamically when referenced.
:param choices: callable; this will be called to generate choices each
time they are referenced
'''
widget = DynamicSelect
def __init__(self, choices=None, widget=None, *args, **kwargs):
# ChoiceField.__init__ tries to set static choices, which won't
# work since our choices are dynamic, so we're going to have to skip
# over it.
# First normalize our choices
if choices is None:
choices = lambda: ()
self._choices = choices
# Then normalize our widget, constructing it with our choices
# function if we need to construct it.
if widget is None:
widget = self.widget
if isinstance(widget, type):
widget = widget(choices=self._choices)
# Now call call super.__init__(), but bypass ChoiceField.
# ChoiceField just sets static choices manually and then calls its
# own super. We don't have static choices, so ChoiceField.__init__()
# would break if we called it. Skip over ChoiceField and go straight
# to *its* super.__init__().
super(ChoiceField, self).__init__(widget=widget, *args, **kwargs)
def _get_choices(self):
return self._choices()
def _set_choices(self, choices):
# if choices is updated, update the widget choice callable also
self._choices = choices
self.widget._choices = self._choices
choices = property(_get_choices, _set_choices)
|
|
# -*- coding: utf-8 -*-
import wabbit_wappa as _wabbit_wappa
from math import exp
import copy
from collections.abc import Mapping
class Namespace(_wabbit_wappa.Namespace):
"""
A subclass of wabbit wappa Namespace that allows one to set
features from a dictionary
"""
def add_features(self, features):
"""
Overridden add_features allows setting features from a dictionary/mapping
"""
if isinstance(features, Mapping):
features = features.items()
super(Namespace, self).add_features(features)
class Example(object):
"""
Wraps an example for Vowpal Wabbit.
"""
def __init__(self,
label=None,
importance=None,
base=None,
tag=None,
features=None,
namespaces=None):
self.label = label
self.importance = importance
self.base = base
self.tag = tag
self.namespaces = {}
if namespaces:
self.add_namespaces(namespaces)
if features:
self.add_namespace(Namespace(features=features))
def __getitem__(self, name):
return self.namespace(name)
def namespace(self, name=None):
if not name in self.namespaces:
self.add_namespace(Namespace(name=name))
return self.namespaces[name]
def add_feature(self, feature, value=None, namespace=None):
self.namespace(namespace).add_feature(feature, value)
def add_features(self, features, namespace=None):
self.namespace(namespace).add_features(features)
def add_namespaces(self, namespaces):
for i in namespaces:
self.add_namespace(i)
def add_namespace(self, namespace):
self.namespaces[namespace.name] = namespace
return namespace
def make_line(self, label=None, importance=None, base=None, tag=None):
"""Makes and returns an example string in VW syntax.
If given, 'response', 'importance', 'base', and 'tag' are used
to label the example. Features for the example come from
any given features or namespaces, as well as any previously
added namespaces (using them up in the process).
"""
response = self.label if label is None else label
importance = self.importance if importance is None else importance
tag = self.tag if tag is None else tag
base = self.base if base is None else base
substrings = []
tokens = []
if response is not None:
token = str(response)
tokens.append(token)
if importance is not None: # Check only if response is given
token = str(importance)
tokens.append(token)
if base is not None: # Check only if importance is given
token = str(base)
tokens.append(token)
if tag is not None:
token = "'" + str(tag) # Tags are unambiguous if given a ' prefix
tokens.append(token)
else:
token = "" # Spacing element to avoid ambiguity in parsing
tokens.append(token)
substring = ' '.join(tokens)
substrings.append(substring)
if self.namespaces:
for namespace in self.namespaces.values():
substring = namespace.to_string()
substrings.append(substring)
else:
substrings.append('') # For correct syntax
line = '|'.join(substrings)
return line
def __str__(self):
return "<Example: '{}'>".format(self.make_line())
class BinaryLogisticPredictionResult(object):
def __init__(self, result):
self.value = result.prediction
if hasattr(result, 'importance'):
self.importance = result.importance
@property
def label(self):
return (-1, 1)[self.value >= 0]
@property
def logistic(self):
"""
Returns the 0..1 probability of label being 1
"""
return 1.0 / (1.0 + exp(-self.value))
@property
def logistic_11(self):
"""
Returns the value of -1..1 logistic function at
the resulting value.
"""
return 1.0 / (2.0 + exp(-self.value)) - 1.0
class Rabbit(object):
_result_factory = BinaryLogisticPredictionResult
def __init__(self, **kwargs):
self.options = kwargs
def start(self):
self.vw = _wabbit_wappa.VW(**self.options)
def send_line(self, line, parse_response=False):
return self.vw.send_line(line, parse_response)
def make_line(self, *, example=None, label=None, importance=None, base=None,
tag=None, features=None, namespaces=None, no_label=False):
example = copy.copy(example) or Example()
if namespaces:
example.add_namespaces(namespaces)
if features:
example.add_features(features)
if no_label:
example.label = None
return example.make_line(label=label, importance=importance, base=base, tag=tag)
def teach(self, *, example=None, label=None, importance=None, base=None, tag=None,
features=None, namespaces=None):
line = self.make_line(example=example, label=label, importance=importance,
tag=tag, features=features, namespaces=namespaces)
self.send_line(line, parse_response=True)
def _get_prediction_for_line(self, line):
return self._result_factory(self.send_line(line, True))
def predict(self, *, example=None, base=None, tag=None,
features=None, namespaces=None):
line = self.make_line(example=example, no_label=True,
tag=tag, features=features, namespaces=namespaces)
return self._get_prediction_for_line(line)
class OfflineRabbit(Rabbit):
"""
An OfflineRabbit instance writes whatever examples it is given, in cooked
form, in the given file.
"""
def __init__(self, fp, **kwargs):
"""
:param fp: an open file, where the examples are written as plain string lines
Other parameters as per Rabbit
"""
super(OfflineRabbit, self).__init__(**kwargs)
self.fp = fp
def start(self):
"""
No-op for OfflineRabbit
"""
pass
def predict(self):
raise Exception("Unable to predict without an actual VW instance")
def send_line(self, line, parse_response=False):
self.fp.write(line + '\n')
class ActiveRabbit(Rabbit):
def __init__(self, **kwargs):
super(ActiveRabbit, self).__init__(active_mode=True, **kwargs)
|
|
# Natural Language Toolkit: Plaintext Corpus Reader
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Corpus reader for the XML version of the British National Corpus.
"""
__docformat__ = 'epytext en'
import re
import xml.etree.ElementTree as ET
from .api import *
from .util import *
from .xmldocs import *
class BNCCorpusReader(XMLCorpusReader):
"""
Corpus reader for the XML version of the British National Corpus.
For access to the complete XML data structure, use the ``xml()``
method. For access to simple word lists and tagged word lists, use
``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
"""
def __init__(self, root, fileids, lazy=True):
XMLCorpusReader.__init__(self, root, fileids)
self._lazy = lazy
def words(self, fileids=None, strip_space=True, stem=False):
"""
:return: the given file(s) as a list of words
and punctuation symbols.
:rtype: list(str)
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
if self._lazy:
return concat([BNCWordView(fileid, False, None,
strip_space, stem)
for fileid in self.abspaths(fileids)])
else:
return concat([self._words(fileid, False, None,
strip_space, stem)
for fileid in self.abspaths(fileids)])
def tagged_words(self, fileids=None, c5=False, strip_space=True, stem=False):
"""
:return: the given file(s) as a list of tagged
words and punctuation symbols, encoded as tuples
``(word,tag)``.
:rtype: list(tuple(str,str))
:param c5: If true, then the tags used will be the more detailed
c5 tags. Otherwise, the simplified tags will be used.
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
if c5: tag = 'c5'
else: tag = 'pos'
if self._lazy:
return concat([BNCWordView(fileid, False, tag, strip_space, stem)
for fileid in self.abspaths(fileids)])
else:
return concat([self._words(fileid, False, tag, strip_space, stem)
for fileid in self.abspaths(fileids)])
def sents(self, fileids=None, strip_space=True, stem=False):
"""
:return: the given file(s) as a list of
sentences or utterances, each encoded as a list of word
strings.
:rtype: list(list(str))
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
if self._lazy:
return concat([BNCWordView(fileid, True, None, strip_space, stem)
for fileid in self.abspaths(fileids)])
else:
return concat([self._words(fileid, True, None, strip_space, stem)
for fileid in self.abspaths(fileids)])
def tagged_sents(self, fileids=None, c5=False, strip_space=True,
stem=False):
"""
:return: the given file(s) as a list of
sentences, each encoded as a list of ``(word,tag)`` tuples.
:rtype: list(list(tuple(str,str)))
:param c5: If true, then the tags used will be the more detailed
c5 tags. Otherwise, the simplified tags will be used.
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
if c5: tag = 'c5'
else: tag = 'pos'
if self._lazy:
return concat([BNCWordView(fileid, True, tag, strip_space, stem)
for fileid in self.abspaths(fileids)])
else:
return concat([self._words(fileid, True, tag, strip_space, stem)
for fileid in self.abspaths(fileids)])
def _words(self, fileid, bracket_sent, tag, strip_space, stem):
"""
Helper used to implement the view methods -- returns a list of
words or a list of sentences, optionally tagged.
:param fileid: The name of the underlying file.
:param bracket_sent: If true, include sentence bracketing.
:param tag: The name of the tagset to use, or None for no tags.
:param strip_space: If true, strip spaces from word tokens.
:param stem: If true, then substitute stems for words.
"""
result = []
xmldoc = ElementTree.parse(fileid).getroot()
for xmlsent in xmldoc.findall('.//s'):
sent = []
for xmlword in _all_xmlwords_in(xmlsent):
word = xmlword.text
if not word:
word = "" # fixes issue 337?
if strip_space or stem: word = word.strip()
if stem: word = xmlword.get('hw', word)
if tag == 'c5':
word = (word, xmlword.get('c5'))
elif tag == 'pos':
word = (word, xmlword.get('pos', xmlword.get('c5')))
sent.append(word)
if bracket_sent:
result.append(BNCSentence(xmlsent.attrib['n'], sent))
else:
result.extend(sent)
assert None not in result
return result
def _all_xmlwords_in(elt, result=None):
if result is None: result = []
for child in elt:
if child.tag in ('c', 'w'): result.append(child)
else: _all_xmlwords_in(child, result)
return result
class BNCSentence(list):
"""
A list of words, augmented by an attribute ``num`` used to record
the sentence identifier (the ``n`` attribute from the XML).
"""
def __init__(self, num, items):
self.num = num
list.__init__(self, items)
class BNCWordView(XMLCorpusView):
"""
A stream backed corpus view specialized for use with the BNC corpus.
"""
def __init__(self, fileid, sent, tag, strip_space, stem):
"""
:param fileid: The name of the underlying file.
:param sent: If true, include sentence bracketing.
:param tag: The name of the tagset to use, or None for no tags.
:param strip_space: If true, strip spaces from word tokens.
:param stem: If true, then substitute stems for words.
"""
if sent: tagspec = '.*/s'
else: tagspec = '.*/s/(.*/)?(c|w)'
self._sent = sent
self._tag = tag
self._strip_space = strip_space
self._stem = stem
XMLCorpusView.__init__(self, fileid, tagspec)
# Read in a tasty header.
self._open()
self.read_block(self._stream, '.*/teiHeader$', self.handle_header)
self.close()
# Reset tag context.
self._tag_context = {0: ()}
title = None #: Title of the document.
author = None #: Author of the document.
editor = None #: Editor
resps = None #: Statement of responsibility
def handle_header(self, elt, context):
# Set up some metadata!
titles = elt.findall('titleStmt/title')
if titles: self.title = '\n'.join(
[title.text.strip() for title in titles])
authors = elt.findall('titleStmt/author')
if authors: self.author = '\n'.join(
[author.text.strip() for author in authors])
editors = elt.findall('titleStmt/editor')
if editors: self.editor = '\n'.join(
[editor.text.strip() for editor in editors])
resps = elt.findall('titleStmt/respStmt')
if resps: self.resps = '\n\n'.join(
'\n'.join(resp_elt.text.strip() for resp_elt in resp)
for resp in resps)
def handle_elt(self, elt, context):
if self._sent: return self.handle_sent(elt)
else: return self.handle_word(elt)
def handle_word(self, elt):
word = elt.text
if not word:
word = "" # fixes issue 337?
if self._strip_space or self._stem:
word = word.strip()
if self._stem:
word = elt.get('hw', word)
if self._tag == 'c5':
word = (word, elt.get('c5'))
elif self._tag == 'pos':
word = (word, elt.get('pos', elt.get('c5')))
return word
def handle_sent(self, elt):
sent = []
for child in elt:
if child.tag == 'mw':
sent += [self.handle_word(w) for w in child]
elif child.tag in ('w','c'):
sent.append(self.handle_word(child))
else:
raise ValueError('Unexpected element %s' % child.tag)
return BNCSentence(elt.attrib['n'], sent)
|
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function
from chainer import function_node
from chainer.utils import type_check
import chainerx
def _extract_gates(x):
r = x.reshape((len(x), x.shape[1] // 4, 4) + x.shape[2:])
return [r[:, :, i] for i in six.moves.range(4)]
def _sigmoid(x, xp=numpy):
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _grad_sigmoid(x):
return x * (1 - x)
def _grad_grad_sigmoid(x):
return x * (1 - x) * (1 - 2 * x)
def _grad_tanh(x):
return 1 - x * x
def _grad_grad_tanh(x, gx):
return -2 * x * gx
_preamble = '''
template <typename T> __device__ T sigmoid(T x) {
const T half = 0.5;
return tanh(x * half) * half + half;
}
template <typename T> __device__ T grad_sigmoid(T y) { return y * (1 - y); }
template <typename T> __device__ T grad_tanh(T y) { return 1 - y * y; }
#define COMMON_ROUTINE \
T aa = tanh(a); \
T ai = sigmoid(i_); \
T af = sigmoid(f); \
T ao = sigmoid(o);
'''
class LSTM(function_node.FunctionNode):
"""Long short-term memory unit with forget gate.
It has two inputs (c, x) and two outputs (c, h), where c indicates the cell
state. x must have four times channels compared to the number of units.
"""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('c', 'x'))
c_type, x_type = in_types
type_check.expect(
c_type.dtype.kind == 'f',
x_type.dtype == c_type.dtype,
c_type.ndim >= 2,
x_type.ndim >= 2,
c_type.ndim == x_type.ndim,
x_type.shape[0] <= c_type.shape[0],
x_type.shape[1] == 4 * c_type.shape[1],
)
for i in six.moves.range(2, type_check.eval(c_type.ndim)):
type_check.expect(x_type.shape[i] == c_type.shape[i])
def forward_chainerx(self, inputs):
c, x = inputs
c_next, h = chainerx.lstm(c, x)
return c_next, h
def forward(self, inputs):
self.retain_inputs((0, 1))
c_prev, x = inputs
a, i, f, o = _extract_gates(x)
batch = len(x)
if isinstance(x, chainer.get_cpu_array_types()):
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
else:
xp = numpy
a = xp.tanh(a)
i = _sigmoid(i, xp)
f = _sigmoid(f, xp)
o = _sigmoid(o, xp)
c_next = numpy.empty_like(c_prev)
c_next[:batch] = a * i + f * c_prev[:batch]
h = o * xp.tanh(c_next[:batch])
else:
c_next = cuda.cupy.empty_like(c_prev)
h = cuda.cupy.empty_like(c_next[:batch])
cuda.elementwise(
'T c_prev, T a, T i_, T f, T o', 'T c, T h',
'''
COMMON_ROUTINE;
c = aa * ai + af * c_prev;
h = ao * tanh(c);
''',
'lstm_fwd', preamble=_preamble)(
c_prev[:batch], a, i, f, o, c_next[:batch], h)
c_next[batch:] = c_prev[batch:]
self.retain_outputs((0,))
return c_next, h
def backward(self, indexes, grads):
grad_inputs = (
self.get_retained_inputs() + self.get_retained_outputs() + grads)
return LSTMGrad()(*grad_inputs)
class LSTMGrad(function.Function):
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
c_prev, x, c_next, gc, gh = inputs
batch = len(x)
gx = xp.empty_like(x)
ga, gi, gf, go = _extract_gates(gx)
# Consider the case that either gradient is not given
if gc is None:
gc_update = 0
gc_rest = 0
else:
gc_update = gc[:batch]
gc_rest = gc[batch:]
if gh is None:
gh = 0
a, i, f, o = _extract_gates(x)
if xp is numpy:
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
tanh_a = xp.tanh(a)
sig_i = _sigmoid(i, xp)
sig_f = _sigmoid(f, xp)
sig_o = _sigmoid(o, xp)
co = xp.tanh(c_next[:batch])
gc_prev = numpy.empty_like(c_prev)
# multiply f later
gc_prev[:batch] = gh * sig_o * _grad_tanh(co) + gc_update
gc = gc_prev[:batch]
ga[:] = gc * sig_i * _grad_tanh(tanh_a)
gi[:] = gc * tanh_a * _grad_sigmoid(sig_i)
gf[:] = gc * c_prev[:batch] * _grad_sigmoid(sig_f)
go[:] = gh * co * _grad_sigmoid(sig_o)
gc_prev[:batch] *= sig_f # multiply f here
gc_prev[batch:] = gc_rest
else:
gc_prev = xp.empty_like(c_prev)
cuda.elementwise(
'T c_prev, T c, T gc, T gh, T a, T i_, T f, T o',
'T gc_prev, T ga, T gi, T gf, T go',
'''
COMMON_ROUTINE;
T co = tanh(c);
T temp = gh * ao * grad_tanh(co) + gc;
ga = temp * ai * grad_tanh(aa);
gi = temp * aa * grad_sigmoid(ai);
gf = temp * c_prev * grad_sigmoid(af);
go = gh * co * grad_sigmoid(ao);
gc_prev = temp * af;
''',
'lstm_bwd', preamble=_preamble)(
c_prev[:batch], c_next[:batch], gc_update, gh, a, i, f, o,
gc_prev[:batch], ga, gi, gf, go)
gc_prev[batch:] = gc_rest
return gc_prev, gx
def backward(self, inputs, grads):
xp = backend.get_array_module(*inputs)
c_prev, x, c, gc, gh = inputs
ggc_prev, ggx = grads
batch = len(x)
gc_is_none = gc is None
gh_is_none = gh is None
ggc_prev_is_none = ggc_prev is None
ggx_is_none = ggx is None
if gc_is_none:
gc = 0
if gh_is_none:
gh = 0
if ggc_prev_is_none:
ggc_prev = 0
if ggx_is_none:
ggx = 0
gc_prev = xp.empty_like(c_prev)
gx = xp.empty_like(x)
gc_next = xp.empty_like(c)
ggc = xp.empty_like(c_prev)
ggh = xp.empty_like(c[:batch])
gc_prev[batch:] = 0
gc_next[batch:] = 0
ggc[batch:] = 0 if ggc_prev_is_none else ggc_prev[batch:]
ggh[batch:] = 0
c_prev = c_prev[:batch]
c = c[:batch]
if not gc_is_none:
gc = gc[:batch]
if not ggc_prev_is_none:
ggc_prev = ggc_prev[:batch]
if not ggx_is_none:
ggx = ggx[:batch]
a, i, f, o = _extract_gates(x)
if not ggx_is_none:
gga, ggi, ggf, ggo = _extract_gates(ggx)
else:
gga = 0
ggi = 0
ggf = 0
ggo = 0
ga, gi, gf, go = _extract_gates(gx)
lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo,
gc_prev[:batch], ga[:], gi[:], gf[:], go[:], gc_next[:batch],
ggc[:batch], ggh[:batch])
if gc_is_none:
ggc = None
if gh_is_none:
ggh = None
return gc_prev, gx, gc_next, ggc, ggh
@cuda.fuse()
def lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo,
gc_prev, ga, gi, gf, go, gc_next, ggc, ggh):
xp = backend.get_array_module(a)
sig_o = _sigmoid(o, xp)
gsig_o = _grad_sigmoid(sig_o)
ggsig_o = _grad_grad_sigmoid(sig_o)
sig_i = _sigmoid(i, xp)
gsig_i = _grad_sigmoid(sig_i)
ggsig_i = _grad_grad_sigmoid(sig_i)
sig_f = _sigmoid(f, xp)
gsig_f = _grad_sigmoid(sig_f)
ggsig_f = _grad_grad_sigmoid(sig_f)
tanh_a = xp.tanh(a)
gtanh_a = _grad_tanh(tanh_a)
ggtanh_a = _grad_grad_tanh(tanh_a, gtanh_a)
tanh_c = xp.tanh(c)
gtanh_c = _grad_tanh(tanh_c)
ggtanh_c = _grad_grad_tanh(tanh_c, gtanh_c)
gc_bar = gh * sig_o * gtanh_c + gc
gc_prev[:] = ggf * gc_bar * gsig_f
ga[:] = (gga * sig_i * ggtanh_a + ggi * gtanh_a * gsig_i) * gc_bar
gi[:] = (gga * gtanh_a * gsig_i + ggi * tanh_a * ggsig_i) * gc_bar
gf[:] = (ggc_prev * (gh * sig_o * gtanh_c + gc) * gsig_f +
ggf * gc_bar * c_prev * ggsig_f)
ggc[:] = (ggc_prev * sig_f +
gga * sig_i * gtanh_a +
ggi * tanh_a * gsig_i +
ggf * c_prev * gsig_f)
dgc_do = gh * gsig_o * gtanh_c
go[:] = ggc * dgc_do + ggo * gh * tanh_c * ggsig_o
dgc_dc = gh * sig_o * ggtanh_c
gc_next[:] = ggc * dgc_dc + ggo * gh * gtanh_c * gsig_o
ggh[:] = ggc * sig_o * gtanh_c + ggo * tanh_c * gsig_o
return gc_prev, ga, gi, gf, go, gc_next, ggc, ggh
def lstm(c_prev, x):
"""Long Short-Term Memory units as an activation function.
This function implements LSTM units with forget gates. Let the previous
cell state ``c_prev`` and the input array ``x``.
First, the input array ``x`` is split into four arrays
:math:`a, i, f, o` of the same shapes along the second axis. It means that
``x`` 's second axis must have 4 times the ``c_prev`` 's second axis.
The split input arrays are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`f` : sources of forget gate
- :math:`o` : sources of output gate
Second, it computes the updated cell state ``c`` and the outgoing signal
``h`` as:
.. math::
c &= \\tanh(a) \\sigma(i)
+ c_{\\text{prev}} \\sigma(f), \\\\
h &= \\tanh(c) \\sigma(o),
where :math:`\\sigma` is the elementwise sigmoid function.
These are returned as a tuple of two variables.
This function supports variable length inputs. The mini-batch size of
the current input must be equal to or smaller than that of the previous
one. When mini-batch size of ``x`` is smaller than that of ``c``, this
function only updates ``c[0:len(x)]`` and doesn't change the rest of ``c``,
``c[len(x):]``.
So, please sort input sequences in descending order of lengths before
applying the function.
Args:
c_prev (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the previous cell state. The cell state
should be a zero array or the output of the previous call of LSTM.
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate. It must have the second dimension whose size
is four times of that of the cell state.
Returns:
tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``.
``c`` is the updated cell state. ``h`` indicates the outgoing signal.
See the original paper proposing LSTM with forget gates:
`Long Short-Term Memory in Recurrent Neural Networks
<http://www.felixgers.de/papers/phd.pdf>`_.
.. seealso::
:class:`~chainer.links.LSTM`
.. admonition:: Example
Assuming ``y`` is the current incoming signal, ``c`` is the previous
cell state, and ``h`` is the previous outgoing signal from an ``lstm``
function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Most typical preparation of ``x`` is:
>>> n_units = 100
>>> y = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> h = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> c = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> model = chainer.Chain()
>>> with model.init_scope():
... model.w = L.Linear(n_units, 4 * n_units)
... model.v = L.Linear(n_units, 4 * n_units)
>>> x = model.w(y) + model.v(h)
>>> c, h = F.lstm(c, x)
It corresponds to calculate the input array ``x``, or the input
sources :math:`a, i, f, o`, from the current incoming signal ``y`` and
the previous outgoing signal ``h``. Different parameters are used for
different kind of input sources.
.. note::
We use the naming rule below.
- incoming signal
The formal input of the formulation of LSTM (e.g. in NLP, word
vector or output of lower RNN layer). The input of
:class:`chainer.links.LSTM` is the *incoming signal*.
- input array
The array which is linear transformed from *incoming signal* and
the previous outgoing signal. The *input array* contains four
sources, the sources of cell input, input gate, forget gate and
output gate. The input of
:class:`chainer.functions.activation.lstm.LSTM` is the
*input array*.
"""
return LSTM().apply((c_prev, x))
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
orm.UserWarning.objects.all().delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_categories'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'last_activity_check': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'power_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'bantime': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
|
|
import os
from os.path import dirname, join
from collections import OrderedDict
import pandas as pd
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox, column
from bokeh.models import Select, Div, Column, HoverTool, ColumnDataSource, Button, CheckboxButtonGroup
from bokeh.plotting import figure
from bokeh.sampledata.periodic_table import elements
df_obs = pd.read_csv('./crossfilter_app/Data/DataC.csv')
# single reference standard this can be an on request
# basis input as well
#df_ref = pd.read_json('./Data/Ref.json')
# dividing data into gneeral discretes and continuous
columns = sorted(df_obs.columns) #+ sorted(df.columns)
discrete = [x for x in columns if df_obs[x].dtype == object]
continuous = [x for x in columns if x not in discrete]
####################################################################
##divide data into plottables and non plottables (aggregates or 3D plottables) ,
#keep it to 2D plottables for now, this is known from the column names themselves
plottables = ['k-point', 'value', 'perc_precisions']
x_select = Select(title='X-Axis', value='k-point', options=plottables)
y_select = Select(title='Y-Axis', value='value', options=plottables)
non_plottables = [ x for x in columns if x not in plottables ] # for aggregates
structures = list(np.unique(df_obs['structure']))
_elements = list(np.unique(df_obs['element']))
#print (_elements)
exchanges = list(np.unique(df_obs['exchange']))
properties = list(np.unique(df_obs['property']))
codes = list(np.unique(df_obs['code']))
# which sets of k-point and value to string together ? any unit transformations on the dataset values or k-point
## have another dataframe (mongo collection) for the reference standards to compute the accuracy (uniquely identified by the element SAME standard should apply to all codes/exchanges/elements.
############## Header Content from description.html #################
content_filename = join(dirname(__file__), "test_desc.html")
description = Div(text=open(content_filename).read(),
render_as_text=False, width=600)
# periodic table widget
romans = ["I", "II", "III", "IV", "V", "VI", "VII"]
elements["atomic mass"] = elements["atomic mass"].astype(str)
print("Table---")
#print(elements.period)
print("---Table")
try:
elements["period"] = [romans[x-1] for x in elements.period]
except:
pass
elements = elements[elements.group != "-"]
group_range = [str(x) for x in range(1, 19)]
colormap = {
"c" : "#ffa07a",
"nc" : "#A9A9A9"
}
elems_colorpair = {'H':'nc','He':'nc',
'Li':'nc','Be':'nc','B':'nc','C':'nc', 'N':'nc', 'O':'nc','F':'nc','Ne':'nc',
'Na':'nc','Mg':'nc', 'Al':'c','Si':'nc','P':'nc','S':'nc','Cl':'nc','Ar':'nc',
'K': 'nc', 'Ca':'nc','Sc':'c', 'Ti':'c' ,'V':'c' , 'Cr':'c', 'Mn':'c', 'Fe':'c', 'Co':'c', 'Ni':'c', 'Cu':'c', 'Zn':'c',
'Rb':'nc', 'Sr':'nc','Y':'c', 'Zr':'c', 'Nb':'c', 'Mo':'c', 'Tc':'c', 'Ru':'c', 'Rh':'c', 'Pd':'c', 'Ag':'c','Cd': 'c',
'Cs':'nc', 'Ba':'nc', 'Hf':'c', 'Ta':'c', 'W':'c', 'Re':'c', 'Os':'c', 'Ir':'c', 'Pt':'c', 'Au':'c', 'Hg':'c'
}
elems_colorpair.update( { key:'nc' for key in list(elements['symbol']) if key not in list(elems_colorpair.keys()) } )
print ([ colormap[elems_colorpair[x]] for x in elements['symbol'] ])
source = ColumnDataSource(
data=dict(
group=[str(x) for x in elements["group"]],
period=[str(y) for y in elements["period"]],
symx=[str(x)+":0.1" for x in elements["group"]],
numbery=[str(x)+":0.8" for x in elements["period"]],
massy=[str(x)+":0.15" for x in elements["period"]],
namey=[str(x)+":0.3" for x in elements["period"]],
sym=elements["symbol"],
name=elements["name"],
# cpk=elements["CPK"],
atomic_number=elements["atomic number"],
# electronic=elements["electronic configuration"],
# mass=elements["atomic mass"],
B=['B' for x in elements["atomic mass"]],
dB=['dB' for x in elements["atomic mass"]],
V0=['V0' for x in elements["atomic mass"]],
E0=['E0' for x in elements["atomic mass"]],
# type=elements["metal"],
type_color=[ colormap[elems_colorpair[x]] for x in elements['symbol'] ],
)
)
# plot the periodic layout
name = source.data["name"]
B = source.data["B"]
ptable = figure(title="Periodic Table", tools="hover",
x_range=group_range, y_range=list(reversed(romans)))
ptable.plot_width = 1500
ptable.toolbar_location = None
ptable.outline_line_color = None
ptable.background_fill_color = 'white'
ptable.rect("group", "period", 0.9, 0.9, source=source,
fill_alpha=0.3, color='type_color')
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_align": "left",
"text_baseline": "middle"
}
ptable.text(x="symx", y="period", text="sym",
text_font_style="bold", text_font_size="22pt", **text_props)
ptable.text(x="symx", y="numbery", text="atomic_number",
text_font_size="9pt", **text_props)
ptable.grid.grid_line_color = None
ptable.select_one(HoverTool).tooltips = [
("name", "@name"),
("V0 (A^3 per atom)", "@V0"),
("B (GPa)", "@B"),
("dB/dP", "@dB")
]
######### CREATES CROSSFILTER ##########################
# decide if all columns or crossfilter down to sub properties
#source_data = pd.DataFrame({})#ColumnDataSource(data=dict())
class CrossFiltDFs():
def __init__(self,struct_df=None,elem_df=None,prop_df=None,\
plot_data=None, code_df=None, exchange_df=None):
self.struct_df = struct_df
self.elem_df = elem_df
self.prop_df = prop_df
self.code_df = code_df
self.exchange_df = exchange_df
self.plot_data = plot_data
def crossfilter_by_tag(self,df, tag):
"""
a crossfilter that can recursivly update the unique options
in the UI based on prioir selections
returns crossfiltered by tag crossfilter {'element': 'Ag'}
"""
col,spec= list(tag.items())[0]
#col, spec = tag.items()
return df[df[col]==spec]
def update_ptable(self):
"""
update the periodic table highlighted elements
"""
from bokeh.sampledata.periodic_table import elements
romans = ["I", "II", "III", "IV", "V", "VI", "VII"]
elements["atomic mass"] = elements["atomic mass"].astype(str)
elements["period"] = [x for x in elements.period]
elements = elements[elements.group != "-"]
group_range = [str(x) for x in range(1, 19)]
print ('reaches colormap def')
colormap = {
"c" : "#ffa07a",
"nc" : "#A9A9A9"
}
elems_colorpair = {}
B_extrapol_props = {}
dB_extrapol_props = {}
V0_extrapol_props = {}
E0_extrapol_props = {}
for e in elements["symbol"]:
for p in np.unique(list(self.struct_df['property'])):
if e in np.unique(list(self.struct_df['element'])):
#print (p,e,'avail')
e_struct = self.struct_df[self.struct_df['element']==e]
p_e_struct = e_struct[e_struct['property']==p]
elem_prop = {e: np.unique(list(p_e_struct['extrapolate']))[0]}
else:
elem_prop = {e:'xxx'}
if p=='B':
B_extrapol_props.update(elem_prop)
elif p=='dB':
dB_extrapol_props.update(elem_prop)
elif p=='v0':
print ('V0', elem_prop)
V0_extrapol_props.update(elem_prop)
elif p =='E0':
E0_extrapol_props.update(elem_prop)
elems_colorpair.update( { key:'c' for key in np.unique(list(self.struct_df['element'])) } )
elems_colorpair.update( { key:'nc' for key in list(elements['symbol']) if key not in list(elems_colorpair.keys()) } )
print ([ colormap[elems_colorpair[x]] for x in elements['symbol'] ])
source = ColumnDataSource(
data=dict(
group=[str(x) for x in elements["group"]],
period=[str(y) for y in elements["period"]],
symx=[str(x)+":0.1" for x in elements["group"]],
numbery=[str(x)+":0.8" for x in elements["period"]],
massy=[str(x)+":0.15" for x in elements["period"]],
namey=[str(x)+":0.3" for x in elements["period"]],
sym=elements["symbol"],
name=elements["name"],
# cpk=elements["CPK"],
atomic_number=elements["atomic number"],
# electronic=elements["electronic configuration"],
B=[B_extrapol_props[x] for x in elements["symbol"]],
dB=[dB_extrapol_props[x] for x in elements["symbol"]],
V0=[V0_extrapol_props[x] for x in elements["symbol"]],
E0=[E0_extrapol_props[x] for x in elements["symbol"]],
type=elements["metal"],
type_color=[ colormap[elems_colorpair[x]] for x in elements['symbol'] ],
)
)
# plot the periodic layout
#name = source.data["name"]
#B = source.data["B"]
ptable = figure(title="Periodic Table", tools="hover",
x_range=group_range, y_range=list(reversed(romans)))
ptable.background_fill_color='white'
ptable.plot_width = 1500
ptable.toolbar_location = None
ptable.outline_line_color = None
ptable.rect("group", "period", 0.9, 0.9, source=source,
fill_alpha=0.3, color='type_color')
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_align": "left",
"text_baseline": "middle"
}
ptable.text(x="symx", y="period", text="sym",
text_font_style="bold", text_font_size="22pt", **text_props)
ptable.text(x="symx", y="numbery", text="atomic_number",
text_font_size="9pt", **text_props)
# ptable.text(x="symx", y="namey", text="name",
# text_font_size="6pt", **text_props)
# ptable.text(x="symx", y="massy", text="mass",
# text_font_size="5pt", **text_props)
ptable.grid.grid_line_color = None
ptable.select_one(HoverTool).tooltips = [
("name", "@name"),
("V0 (A^3 per atom)", "@V0"),
("B (GPa)", "@B"),
("dB/dP", "@dB")]
return ptable
def create_figure(self,dataset,datplot='Init',plot_type=None):
"""
figure and plot creation for a given dataset
TODO: enable support for multiple selection
refactor to a simple figure creator and
add helper functions for the plots
"""
kw = dict()
x_title = x_select.value.title() + ' Density per atom'
# hack for labels now
if isinstance(dataset,pd.DataFrame):
if np.unique(list(dataset['property']))[0]=='B':
y_title = 'Bulk Modulus (GPa) '+y_select.value.title()
elif np.unique(list(dataset['property']))[0]=='dB':
y_title = 'dB/dP '+y_select.value.title()
elif np.unique(list(dataset['property']))[0]=='v0':
y_title = 'Volume per atom (A^3) '+y_select.value.title()
elif np.unique(list(dataset['property']))[0]=='E0':
y_title = 'DFT Energy per atom (eV/atom) '+y_select.value.title()
else:
y_title = 'Pade Prediction'
kw['title'] = "%s vs %s" % (y_title, x_title)
#if x_select.value=='k-point':
kw['x_axis_type'] = 'log'
if x_select.value == 'perc_precisions' and y_select.value == 'perc_precisions':
kw['y_axis_type'] = 'log'
self.p = figure(plot_height=600, plot_width=800, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)
# sets the axes
self.p.xaxis.axis_label = x_title
self.p.yaxis.axis_label = y_title
if x_select.value in continuous:
self.p.xaxis.major_label_orientation = pd.np.pi / 4
#print (dataset)
if datplot=='Init':
# if data is to be plotted
xs =dataset[x_select.value].values
ys = dataset[y_select.value].values
self.xs_init = xs
self.ys_init = ys
self.p.scatter(x=xs, y=ys)#, alpha=1.0, hover_color='blue', hover_alpha=1.0)
return self.p
elif datplot == 'Add':
# add a plot to figure, from statistical analysis
if plot_type == 'plot_pade':
#pade_order = self.analysis_results['Order']
#pade_extrapolate = self.analysis_results['Extrapolate']
#print (pade_extrapolate, float(pade_extrapolate))
# create precisions based on the extrapolate
#print (self.add_data)
xs = self.add_data[0]
ys = self.add_data[1]#[abs(y-pade_extrapolate) for y in self.ys_init]
#print (ys)
# print (xs,ys,len(xs),len(ys))
print ("Plots a line supposedly")
#print (len(self.ys_init), len(ys))
#l = min([len(self.ys_init), len(ys), len(self.xs_init),len(xs)])
#self.plot_layout.scatter(x=self.xs_init[0:l], y=self.ys_init[0:l])#, alpha=1.0, hover_color='blue', hover_alpha=1.0)
#print (type(self.plot_layout))
#self.p.self.plot
self.p = figure(plot_height=600, plot_width=800, tools='pan,wheel_zoom,box_zoom,reset,box_zoom, hover', **kw)
print('executes till re-figure')
self.p.circle(x=self.xs_init,y=self.ys_init)
print('executes till circle')
self.p.line(x=xs, y=ys, line_color='red')
#self.p.line_color='red'
print('executes till line')
return self.p
else:
# clear the figure by plotting an empty figure
xs = []
ys = []
self.p = figure(plot_height=600, plot_width=800, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)
self.p.scatter(x=xs, y=ys)#, alpha=1.0, hover_color='blue', hover_alpha=1.0)
return self.p
# The crossfilter widgets
def update(self, attr, old, new):
print ('Attribute', attr, 'OLD', old, 'NEW', new)
#print (len(layout.children))
print ('executes here on update')#, exchange_df)
def update_code(self):
"""
update for the code selection
"""
print ('update code')
self.code_df = df_obs[df_obs['code'] == code.value].dropna()
def update_exchange(self):
"""
update the exchange
"""
print ('update exchange')
self.exchange_df = self.code_df[self.code_df['exchange']== exchange.value].dropna()
def update_element(self,new):
print ('Updating element down selection for property',element.active[0])
self.elem_df = self.struct_df[self.struct_df['element'] == _elements[element.active[0]] ].dropna()
self.plot_data = self.elem_df
def update_struct(self):
#print ('Updating struct down selection for element')
#print ("struct.value",struct.value)
self.struct_df = self.exchange_df[self.exchange_df['structure'] == struct.value].dropna()
print ('Updating ptable with structure selection')
layout.children[2] = self.update_ptable()
elem_checkbox= CheckboxButtonGroup(labels=np.unique(list(self.struct_df['element'])), active=[1])
controls2.children[2] = elem_checkbox
self.plot_data = self.struct_df
print ('finished callback to update layout')
def update_prop(self):
#print ('Updating struct down selection for element')
#print (prop.value)
self.prop_df = self.elem_df[self.elem_df['property'] == prop.value].dropna()
#print ('The final dict', self.prop_df.to_dict(orient='list'))
self.plot_data = self.prop_df
def update_x(self):
pass
def update_y(self):
pass
def update_crossfilter(self):
print ('Triggering crossfilter')
#print (type(self.plot_data))
#print (np.unique(self.plot_data['property']))
layout.children[4] = self.create_figure(self.plot_data)
def clear_crossfilter(self):
"""
clear the figure and crossfilter
"""
print ('Trigger clear')
self.struct_df = None
self.elem_df = None
self.prop_df = None
self.code_df = None
self.exchange_df = None
self.plot_data = None
layout.children[4] = self.create_figure(self.plot_data)
def analysis_callback(self):
"""
calls the Pade analysis on the current plot data
TODO:
NOTE: check if this is a data set that is a single scatter
FEATUREs that could be added: plot the Pade for multiple selections
"""
print ('called Pade analysis')
# writes out the crossfiltered plot data on the server
crossfilt = self.plot_data[['k-point','value']]
crossfilt.columns=['Kpt','P']
crossfilt.to_csv('crossfilter_app/Rdata.csv')
print ('wrote out data file')
os.system('Rscript crossfilter_app/non_err_weighted_nls.R')
self.analysis_results = pd.read_csv('crossfilter_app/Result.csv')
#self.add_data = [ list(self.xs_init), list(self.predict_results['Preds....c.predict.m2..']) ]
ext_values = list(self.analysis_results['Extrapolate....extrapolates'])
error_values = list(self.analysis_results['Error....errors'])
self.ext_min_error = ext_values[error_values.index(min(error_values))]
print ('executed R script on crossfiltered data')
if error_values.index(min(error_values))==0:
self.predict_results = pd.read_csv('crossfilter_app/Pade1.csv')
self.add_data = [list(self.predict_results['Px....x_plot']), list(self.predict_results['Py....pade1.x_plot.'])]
elif error_values.index(min(error_values))==1:
self.predict_results = pd.read_csv('crossfilter_app/Pade2.csv')
self.add_data = [list(self.predict_results['Px....x_plot']), list(self.predict_results['Py....pade2.x_plot.'])]
print ('ADD DATA', self.add_data)
layout.children[4] = self.create_figure(self.add_data, datplot='Add', plot_type='plot_pade')
def update():
pass
#source_data = CF.plot_data
# initialize the crossfilter instance
CF = CrossFiltDFs()
# define the selection widgets for code, exchange,
# TODO: enable widgets that support multi-selection
# Elements selection widget from a periodic table
code = Select(title='Code', value=codes[0], options=codes)
code.on_change('value', lambda attr, old, new: CF.update_code())
exchange = Select(title='ExchangeCorrelation', value=exchanges[0], options=exchanges)
exchange.on_change('value', lambda attr, old, new: CF.update_exchange())
struct = Select(title='Structure', value=structures[0], options=structures)
struct.on_change('value', lambda attr, old, new: CF.update_struct())
element = CheckboxButtonGroup(labels=_elements, active=[1])
element.on_click(CF.update_element)
prop = Select(title='Property', value=properties[0], options=properties)
prop.on_change('value', lambda attr, old, new: CF.update_prop())
apply_crossfilter = Button(label='CrossFilter and Plot')
apply_crossfilter.on_click(CF.update_crossfilter)
clean_crossfilter = Button(label='Clear')
clean_crossfilter.on_click(CF.clear_crossfilter)
x_select.on_change('value', lambda attr, old, new: CF.update_x())
y_select.on_change('value', lambda attr, old, new: CF.update_y())
analyse_crossfilt = Button(label='PadeAnalysis')
analyse_crossfilt.on_click(CF.analysis_callback)
code_df = CF.crossfilter_by_tag(df_obs, {'code':code.value})
exchange_df = CF.crossfilter_by_tag(code_df, {'exchange':exchange.value})
struct_df = CF.crossfilter_by_tag(exchange_df, {'structure':struct.value})
elem_df = CF.crossfilter_by_tag(struct_df, {'element':_elements[0]})
prop_df = CF.crossfilter_by_tag(elem_df, {'property':prop.value})
CF_init = CrossFiltDFs(code_df,exchange_df,struct_df,elem_df,prop_df)
print ('executed till here')
#z = Select(title='Z-Axis', value='None', options=plottables)
#z.on_change('value', update)
controls1 = widgetbox([code, exchange, struct], width=400)
controls2 = widgetbox([element, prop, x_select, y_select, apply_crossfilter, analyse_crossfilt, clean_crossfilter], width=400)
#print ('Initial init figure data', type(CF_init.prop_df))
layout = column(description, controls1, ptable, controls2, CF_init.create_figure(CF_init.prop_df))
curdoc().add_root(layout)
curdoc().title = "DFT Benchmark"
update()
|
|
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing parameter support."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
ARCHETYPE = 'aquilon'
class TestParameterDefinition(TestBrokerCommand):
def test_100_add(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testpath", "--value_type=string", "--description=blaah",
"--template=foo", "--required", "--default=default"]
self.noouttest(cmd)
def test_110_add_existing(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testpath", "--value_type=string", "--description=blaah",
"--template=foo", "--required", "--default=default"]
err = self.badrequesttest(cmd)
self.matchoutput(err,
"Parameter Definition testpath, parameter "
"definition holder aquilon already exists.",
cmd)
def test_130_add_default_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testdefault", "--description=blaah",
"--template=foo"]
self.noouttest(cmd)
def test_130_add_int_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testint", "--description=blaah",
"--template=foo", "--value_type=int", "--default=60"]
self.noouttest(cmd)
def test_130_add_invalid_int_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testbadint", "--description=blaah",
"--template=foo", "--value_type=int", "--default=foo"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "Expected an integer for default for path=testbadint", cmd)
def test_130_add_float_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testfloat", "--description=blaah",
"--template=foo", "--value_type=float", "--default=100.100"]
self.noouttest(cmd)
def test_130_add_invalid_float_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testbadfloat", "--description=blaah",
"--template=foo", "--value_type=float", "--default=foo"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "Expected an floating point number for default for path=testbadfloat", cmd)
def test_130_add_boolean_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testboolean", "--description=blaah",
"--template=foo", "--value_type=boolean", "--default=yes"]
self.noouttest(cmd)
def test_130_add_invalid_boolean_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testbadboolean", "--description=blaah",
"--template=foo", "--value_type=boolean", "--default=foo"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "Expected a boolean value for default for path=testbadboolean", cmd)
def test_130_add_list_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testlist", "--description=blaah",
"--template=foo", "--value_type=list", "--default=val1,val2"]
self.noouttest(cmd)
def test_130_add_json_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testjson", "--description=blaah",
"--template=foo", "--value_type=json", "--default=\"{'val1':'val2'}\""]
self.noouttest(cmd)
def test_130_add_invalid_json_value_type(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=testbadjson", "--description=blaah",
"--template=foo", "--value_type=json", "--default=foo"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "The json string specified for default for path=testbadjson is invalid", cmd)
def test_130_add_noncompileable_arch(self):
cmd = ["add_parameter_definition", "--archetype", "windows",
"--path=testint", "--description=blaah",
"--template=foo", "--value_type=int", "--default=60"]
out = self.badrequesttest(cmd)
self.matchoutput(out, "Archetype windows is not compileable.", cmd)
def test_130_rebuild_required(self):
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=test_rebuild_required", "--description=rebuild_required",
"--template=foo", "--value_type=string", "--rebuild_required"]
self.noouttest(cmd)
def test_140_verify_add(self):
cmd = ["search_parameter_definition", "--archetype", ARCHETYPE]
out = self.commandtest(cmd)
self.searchoutput(out,
r'Parameter Definition: testpath \[required\]\s*'
r'Type: string\s*'
r'Template: foo\s*'
r'Default: default',
cmd)
self.searchoutput(out,
r'Parameter Definition: testdefault\s*'
r'Type: string\s*'
r'Template: foo',
cmd)
self.searchoutput(out,
r'Parameter Definition: testint\s*'
r'Type: int\s*'
r'Template: foo\s*'
r'Default: 60',
cmd)
self.searchoutput(out,
r'Parameter Definition: testjson\s*'
r'Type: json\s*'
r'Template: foo\s*'
r"Default: \"{'val1':'val2'}\"",
cmd)
self.searchoutput(out,
r'Parameter Definition: testlist\s*'
r'Type: list\s*'
r'Template: foo\s*'
r'Default: val1,val2',
cmd)
self.searchoutput(out,
r'Parameter Definition: testboolean\s*'
r'Type: boolean\s*'
r'Template: foo\s*'
r'Default: yes',
cmd)
self.searchoutput(out,
r'Parameter Definition: test_rebuild_required\s*'
r'Type: string\s*'
r'Template: foo\s*'
r'Description: rebuild_required\s*'
r'Rebuild Required: True',
cmd)
def test_145_verify_add(self):
cmd = ["search_parameter_definition", "--archetype", ARCHETYPE, "--format=proto"]
out = self.commandtest(cmd)
p = self.parse_paramdefinition_msg(out, 8)
param_defs = p.param_definitions[:]
param_defs.sort(key=lambda x: x.path)
self.failUnlessEqual(param_defs[0].path, 'test_rebuild_required')
self.failUnlessEqual(param_defs[0].value_type, 'string')
self.failUnlessEqual(param_defs[0].template, 'foo')
self.failUnlessEqual(param_defs[0].rebuild_required, True)
self.failUnlessEqual(param_defs[1].path, 'testboolean')
self.failUnlessEqual(param_defs[1].value_type, 'boolean')
self.failUnlessEqual(param_defs[1].template, 'foo')
self.failUnlessEqual(param_defs[1].default, 'yes')
self.failUnlessEqual(param_defs[2].path, 'testdefault')
self.failUnlessEqual(param_defs[2].value_type, 'string')
self.failUnlessEqual(param_defs[2].template, 'foo')
self.failUnlessEqual(param_defs[2].default, '')
self.failUnlessEqual(param_defs[3].path, 'testfloat')
self.failUnlessEqual(param_defs[3].value_type, 'float')
self.failUnlessEqual(param_defs[3].template, 'foo')
self.failUnlessEqual(param_defs[3].default, '100.100')
self.failUnlessEqual(param_defs[4].path, 'testint')
self.failUnlessEqual(param_defs[4].value_type, 'int')
self.failUnlessEqual(param_defs[4].template, 'foo')
self.failUnlessEqual(param_defs[4].default, '60')
self.failUnlessEqual(param_defs[5].path, 'testjson')
self.failUnlessEqual(param_defs[5].value_type, 'json')
self.failUnlessEqual(param_defs[5].template, 'foo')
self.failUnlessEqual(param_defs[5].default, u'"{\'val1\':\'val2\'}"')
self.failUnlessEqual(param_defs[6].path, 'testlist')
self.failUnlessEqual(param_defs[6].value_type, 'list')
self.failUnlessEqual(param_defs[6].template, 'foo')
self.failUnlessEqual(param_defs[6].default, "val1,val2")
self.failUnlessEqual(param_defs[7].path, 'testpath')
self.failUnlessEqual(param_defs[7].value_type, 'string')
self.failUnlessEqual(param_defs[7].template, 'foo')
self.failUnlessEqual(param_defs[7].default, 'default')
self.failUnlessEqual(param_defs[7].is_required, True)
def test_146_update(self):
cmd = ["update_parameter_definition", "--archetype", ARCHETYPE,
"--path=testint", "--description=testint",
"--default=100", "--required",
"--rebuild_required"]
self.noouttest(cmd)
def test_147_verify_add(self):
cmd = ["search_parameter_definition", "--archetype", ARCHETYPE]
out = self.commandtest(cmd)
self.searchoutput(out,
r'Parameter Definition: testint \[required\]\s*'
r'Type: int\s*'
r'Template: foo\s*'
r'Default: 100\s*'
r'Description: testint\s*'
r'Rebuild Required: True',
cmd)
def test_150_del_validation(self):
cmd = ["add_personality", "--archetype", ARCHETYPE,
"--personality=paramtest", "--eon_id=2", "--host_environment=legacy"]
self.noouttest(cmd)
cmd = ["add_parameter", "--personality=paramtest", "--path=testpath",
"--value=test"]
self.noouttest(cmd)
cmd = ["del_parameter_definition", "--archetype", ARCHETYPE, "--path=testpath" ]
out = self.badrequesttest(cmd)
self.matchoutput(out, "Parameter with path testpath used by following and cannot be deleted", cmd)
cmd = ["del_parameter", "--personality=paramtest", "--path=testpath"]
self.noouttest(cmd)
cmd = ["del_personality", "--archetype", ARCHETYPE, "--personality=paramtest"]
self.noouttest(cmd)
def test_200_del(self):
for path in ['testpath', 'testdefault', 'testint', 'testlist', 'testjson',
'testboolean', 'testfloat', 'test_rebuild_required']:
cmd = ["del_parameter_definition", "--archetype", ARCHETYPE,
"--path=%s" % path]
self.noouttest(cmd)
def test_200_verify_delete(self):
cmd = ["search_parameter_definition", "--archetype", ARCHETYPE ]
err = self.notfoundtest(cmd)
self.matchoutput(err, "Not Found: No parameter definitions found for archetype aquilon", cmd)
def test_210_invalid_path_cleaned(self):
for path in ["/startslash", "endslash/"] :
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=%s" % path, "--template=foo", "--value_type=string"]
self.noouttest(cmd)
cmd = ["search_parameter_definition", "--archetype", ARCHETYPE]
out = self.commandtest(cmd)
self.searchoutput(out, r'Parameter Definition: startslash\s*', cmd)
self.searchoutput(out, r'Parameter Definition: endslash\s*', cmd)
def test_215_invalid_path1(self):
for path in ["!badchar", "@badchar", "#badchar", "$badchar", "%badchar", "^badchar",
"&badchar", "*badchar" ":badchar", ";badcharjk", "+badchar"] :
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=%s" % path, "--template=foo", "--value_type=string"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "Invalid path %s specified, path cannot start with special characters" % path,
cmd)
def test_220_valid_path(self):
for path in ["multi/part1/part2", "noslash", "valid/with_under", "valid/with.dot",
"valid/with-dash", "with_under", "with.dot", "with-dash"] :
cmd = ["add_parameter_definition", "--archetype", ARCHETYPE,
"--path=%s" % path, "--template=foo", "--value_type=string"]
self.noouttest(cmd)
cmd = ["del_parameter_definition", "--archetype", ARCHETYPE,
"--path=%s" % path]
self.noouttest(cmd)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestParameterDefinition)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
import re
import time
from azure.storage.blob import BlobPermissions
from c7n_azure.constants import \
FUNCTION_CONSUMPTION_BLOB_CONTAINER, FUNCTION_PACKAGE_SAS_EXPIRY_DAYS
from c7n_azure.provisioning.app_insights import AppInsightsUnit
from c7n_azure.provisioning.app_service_plan import AppServicePlanUnit
from c7n_azure.provisioning.function_app import FunctionAppDeploymentUnit
from c7n_azure.provisioning.storage_account import StorageAccountUnit
from c7n_azure.session import Session
from c7n_azure.storage_utils import StorageUtilities
from c7n_azure.utils import ResourceIdParser, StringUtils
from msrestazure.azure_exceptions import CloudError
from c7n.utils import local_session
class FunctionAppUtilities(object):
log = logging.getLogger('custodian.azure.function_app_utils')
class FunctionAppInfrastructureParameters:
def __init__(self, app_insights, service_plan, storage_account,
function_app_resource_group_name, function_app_name):
self.app_insights = app_insights
self.service_plan = service_plan
self.storage_account = storage_account
self.function_app_resource_group_name = function_app_resource_group_name
self.function_app_name = function_app_name
@staticmethod
def get_storage_account_connection_string(id):
rg_name = ResourceIdParser.get_resource_group(id)
name = ResourceIdParser.get_resource_name(id)
client = local_session(Session).client('azure.mgmt.storage.StorageManagementClient')
obj = client.storage_accounts.list_keys(rg_name, name)
connection_string = 'DefaultEndpointsProtocol={};AccountName={};AccountKey={}'.format(
'https',
name,
obj.keys[0].value)
return connection_string
@staticmethod
def is_consumption_plan(function_params):
return StringUtils.equal(function_params.service_plan['sku_tier'], 'dynamic')
@staticmethod
def deploy_function_app(parameters):
function_app_unit = FunctionAppDeploymentUnit()
function_app_params = \
{'name': parameters.function_app_name,
'resource_group_name': parameters.function_app_resource_group_name}
function_app = function_app_unit.get(function_app_params)
if function_app:
# retrieve the type of app service plan hosting the existing function app
session = local_session(Session)
web_client = session.client('azure.mgmt.web.WebSiteManagementClient')
app_id = function_app.server_farm_id
app_name = ResourceIdParser.get_resource_name(app_id)
app_resource_group_name = ResourceIdParser.get_resource_group(app_id)
app_service_plan = web_client.app_service_plans.get(app_resource_group_name, app_name)
# update the sku tier to properly reflect what is provisioned in Azure
parameters.service_plan['sku_tier'] = app_service_plan.sku.tier
return function_app
sp_unit = AppServicePlanUnit()
app_service_plan = sp_unit.provision_if_not_exists(parameters.service_plan)
# if only resource_id is provided, retrieve existing app plan sku tier
parameters.service_plan['sku_tier'] = app_service_plan.sku.tier
ai_unit = AppInsightsUnit()
app_insights = ai_unit.provision_if_not_exists(parameters.app_insights)
sa_unit = StorageAccountUnit()
storage_account_id = sa_unit.provision_if_not_exists(parameters.storage_account).id
con_string = FunctionAppUtilities.get_storage_account_connection_string(storage_account_id)
function_app_params.update(
{'location': app_service_plan.location,
'app_service_plan_id': app_service_plan.id,
'app_insights_key': app_insights.instrumentation_key,
'is_consumption_plan': FunctionAppUtilities.is_consumption_plan(parameters),
'storage_account_connection_string': con_string})
return function_app_unit.provision(function_app_params)
@staticmethod
def validate_function_name(function_name):
if (function_name is None or len(function_name) > 60 or len(function_name) < 1):
raise ValueError('Function name must be between 1-60 characters. Given name: "' +
str(function_name) + '"')
@staticmethod
def get_function_name(policy_name, suffix):
function_app_name = policy_name + '-' + suffix
return re.sub('[^A-Za-z0-9\\-]', '-', function_app_name)
@classmethod
def publish_functions_package(cls, function_params, package):
session = local_session(Session)
web_client = session.client('azure.mgmt.web.WebSiteManagementClient')
cls.log.info('Publishing Function application')
# provision using Kudu Zip-Deploy
if not cls.is_consumption_plan(function_params):
publish_creds = web_client.web_apps.list_publishing_credentials(
function_params.function_app_resource_group_name,
function_params.function_app_name).result()
if package.wait_for_status(publish_creds):
package.publish(publish_creds)
else:
cls.log.error("Aborted deployment, ensure Application Service is healthy.")
# provision using WEBSITE_RUN_FROM_PACKAGE
else:
# fetch blob client
blob_client = StorageUtilities.get_blob_client_from_storage_account(
function_params.storage_account['resource_group_name'],
function_params.storage_account['name'],
session,
sas_generation=True
)
# create container for package
blob_client.create_container(FUNCTION_CONSUMPTION_BLOB_CONTAINER)
# upload package
blob_name = '%s.zip' % function_params.function_app_name
packageToPublish = package.pkg.get_stream()
count = os.path.getsize(package.pkg.path)
blob_client.create_blob_from_stream(
FUNCTION_CONSUMPTION_BLOB_CONTAINER, blob_name, packageToPublish, count)
packageToPublish.close()
# create blob url for package
sas = blob_client.generate_blob_shared_access_signature(
FUNCTION_CONSUMPTION_BLOB_CONTAINER,
blob_name,
permission=BlobPermissions.READ,
expiry=datetime.datetime.utcnow() +
datetime.timedelta(days=FUNCTION_PACKAGE_SAS_EXPIRY_DAYS)
# expire in 10 years
)
blob_url = blob_client.make_blob_url(
FUNCTION_CONSUMPTION_BLOB_CONTAINER,
blob_name,
sas_token=sas)
# update application settings function package
app_settings = web_client.web_apps.list_application_settings(
function_params.function_app_resource_group_name,
function_params.function_app_name)
app_settings.properties['WEBSITE_RUN_FROM_PACKAGE'] = blob_url
web_client.web_apps.update_application_settings(
function_params.function_app_resource_group_name,
function_params.function_app_name,
kind=str,
properties=app_settings.properties
)
# Sync the scale controller for the Function App.
# Not required for the dedicated plans.
cls._sync_function_triggers(function_params)
cls.log.info('Finished publishing Function application')
@classmethod
def _sync_function_triggers(cls, function_params):
cls.log.info('Sync Triggers...')
# This delay replicates behavior of Azure Functions Core tool
# Link to the github: https://bit.ly/2K5oXbS
time.sleep(5)
session = local_session(Session)
web_client = session.client('azure.mgmt.web.WebSiteManagementClient')
max_retry_attempts = 3
for r in range(max_retry_attempts):
res = None
try:
res = web_client.web_apps.sync_function_triggers(
function_params.function_app_resource_group_name,
function_params.function_app_name
)
except CloudError as e:
# This appears to be a bug in the API
# Success can be either 200 or 204, which is
# unexpected and gets rethrown as a CloudError
if e.response.status_code in [200, 204]:
return True
cls.log.error("Failed to sync triggers...")
cls.log.error(e)
if res and res.status_code in [200, 204]:
return True
else:
cls.log.info("Retrying in 5 seconds...")
time.sleep(5)
cls.log.error("Unable to sync triggers...")
return False
|
|
# Copyright 2012 Josh Durgin
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import math
import os
import tempfile
import mock
from oslo_utils import imageutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.i18n import _
import cinder.image.glance
from cinder.image import image_utils
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test_volume
from cinder.tests.unit import utils
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.manager import create_volume
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
RAISED_EXCEPTIONS = []
class MockException(Exception):
def __init__(self, *args, **kwargs):
RAISED_EXCEPTIONS.append(self.__class__)
class MockImageNotFoundException(MockException):
"""Used as mock for rbd.ImageNotFound."""
class MockImageBusyException(MockException):
"""Used as mock for rbd.ImageBusy."""
class MockImageExistsException(MockException):
"""Used as mock for rbd.ImageExists."""
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _FakeRetrying(wait_func=None,
original_retrying = driver.utils.retrying.Retrying,
*args, **kwargs):
return original_retrying(wait_func=lambda *a, **k: 0,
*args, **kwargs)
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('retrying.Retrying', _FakeRetrying)
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd, mock_client,
mock_proxy):
inst.mock_rbd = mock_rbd
inst.mock_rados = mock_rados
inst.mock_client = mock_client
inst.mock_proxy = mock_proxy
inst.mock_rbd.RBD.Error = Exception
inst.mock_rados.Error = Exception
inst.mock_rbd.ImageBusy = MockImageBusyException
inst.mock_rbd.ImageNotFound = MockImageNotFoundException
inst.mock_rbd.ImageExists = MockImageExistsException
inst.mock_rbd.InvalidArgument = MockImageNotFoundException
inst.driver.rbd = inst.mock_rbd
inst.driver.rados = inst.mock_rados
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
CEPH_MON_DUMP = """dumped monmap epoch 1
{ "epoch": 1,
"fsid": "33630410-6d93-4d66-8e42-3b953cf194aa",
"modified": "2013-05-22 17:44:56.343618",
"created": "2013-05-22 17:44:56.343618",
"mons": [
{ "rank": 0,
"name": "a",
"addr": "[::1]:6789\/0"},
{ "rank": 1,
"name": "b",
"addr": "[::1]:6790\/0"},
{ "rank": 2,
"name": "c",
"addr": "[::1]:6791\/0"},
{ "rank": 3,
"name": "d",
"addr": "127.0.0.1:6792\/0"},
{ "rank": 4,
"name": "e",
"addr": "example.com:6791\/0"}],
"quorum": [
0,
1,
2]}
"""
@ddt.ddt
class RBDTestCase(test.TestCase):
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(RBDTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = None
self.cfg.rbd_cluster_name = 'nondefault'
self.cfg.rbd_pool = 'rbd'
self.cfg.rbd_ceph_conf = None
self.cfg.rbd_secret_uuid = None
self.cfg.rbd_user = None
self.cfg.volume_dd_blocksize = '1M'
self.cfg.rbd_store_chunk_size = 4
self.cfg.rados_connection_retries = 3
self.cfg.rados_connection_interval = 5
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.driver = driver.RBDDriver(execute=mock_exec,
configuration=self.cfg)
self.driver.set_initialized()
self.context = context.get_admin_context()
self.volume_a = fake_volume.fake_volume_obj(
self.context,
**{'name': u'volume-0000000a',
'id': '4c39c3c7-168f-4b32-b585-77f1b3bf0a38',
'size': 10})
self.volume_b = fake_volume.fake_volume_obj(
self.context,
**{'name': u'volume-0000000b',
'id': '0c7d1f44-5a06-403f-bb82-ae7ad0d693a6',
'size': 10})
self.snapshot = fake_snapshot.fake_snapshot_obj(
self.context, name='snapshot-0000000a')
@ddt.data({'cluster_name': None, 'pool_name': 'rbd'},
{'cluster_name': 'volumes', 'pool_name': None})
@ddt.unpack
def test_min_config(self, cluster_name, pool_name):
self.cfg.rbd_cluster_name = cluster_name
self.cfg.rbd_pool = pool_name
with mock.patch('cinder.volume.drivers.rbd.rados'):
self.assertRaises(exception.InvalidConfigurationValue,
self.driver.check_for_setup_error)
@common_mocks
def test_create_volume(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
self.driver.create_volume(self.volume_a)
chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_a.name),
self.volume_a.size * units.Gi, order]
kwargs = {'old_format': False,
'features': client.features}
self.mock_rbd.RBD.return_value.create.assert_called_once_with(
*args, **kwargs)
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
@common_mocks
def test_manage_existing_get_size(self):
with mock.patch.object(self.driver.rbd.Image(), 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image(), 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 2 * units.Gi
existing_ref = {'source-name': self.volume_a.name}
return_size = self.driver.manage_existing_get_size(
self.volume_a,
existing_ref)
self.assertEqual(2, return_size)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing_get_non_integer_size(self):
rbd_image = self.driver.rbd.Image.return_value
rbd_image.size.return_value = int(1.75 * units.Gi)
existing_ref = {'source-name': self.volume_a.name}
return_size = self.driver.manage_existing_get_size(self.volume_a,
existing_ref)
self.assertEqual(2, return_size)
rbd_image.size.assert_called_once_with()
rbd_image.close.assert_called_once_with()
@common_mocks
def test_manage_existing_get_invalid_size(self):
with mock.patch.object(self.driver.rbd.Image(), 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image(), 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 'abcd'
existing_ref = {'source-name': self.volume_a.name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
self.volume_a, existing_ref)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \
mock_rbd_image_rename:
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
mock_rbd_image_rename.return_value = 0
self.driver.manage_existing(self.volume_a, existing_ref)
mock_rbd_image_rename.assert_called_with(
client.ioctx,
exist_volume,
self.volume_a.name)
@common_mocks
def test_manage_existing_with_exist_rbd_image(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
self.mock_rbd.RBD.return_value.rename.side_effect = (
MockImageExistsException)
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
self.assertRaises(self.mock_rbd.ImageExists,
self.driver.manage_existing,
self.volume_a, existing_ref)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageExists])
@common_mocks
def test_manage_existing_with_invalid_rbd_image(self):
self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound
invalid_volume = 'vol-invalid'
invalid_ref = {'source-name': invalid_volume}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
self.volume_a, invalid_ref)
# Make sure the exception was raised
self.assertEqual([self.mock_rbd.ImageNotFound],
RAISED_EXCEPTIONS)
@common_mocks
def test_delete_backup_snaps(self):
self.driver.rbd.Image.remove_snap = mock.Mock()
with mock.patch.object(self.driver, '_get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = [{'name': 'snap1'}]
rbd_image = self.driver.rbd.Image()
self.driver._delete_backup_snaps(rbd_image)
mock_get_backup_snaps.assert_called_once_with(rbd_image)
self.assertTrue(
self.driver.rbd.Image.return_value.remove_snap.called)
@common_mocks
def test_delete_volume(self):
client = self.mock_client.return_value
self.driver.rbd.Image.return_value.list_snaps.return_value = []
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
mock_get_clone_info.return_value = (None, None, None)
self.driver.delete_volume(self.volume_a)
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_a.name,
None)
(self.driver.rbd.Image.return_value
.list_snaps.assert_called_once_with())
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.driver.rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.driver.rbd.RBD.return_value.remove.call_count)
@common_mocks
def delete_volume_not_found(self):
self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound
self.assertIsNone(self.driver.delete_volume(self.volume_a))
self.mock_rbd.Image.assert_called_once_with()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_delete_busy_volume(self):
self.mock_rbd.Image.return_value.list_snaps.return_value = []
self.mock_rbd.RBD.return_value.remove.side_effect = (
self.mock_rbd.ImageBusy)
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (None, None, None)
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
with mock.patch.object(driver, 'RADOSClient') as \
mock_rados_client:
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume, self.volume_a)
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_a.name,
None)
(self.mock_rbd.Image.return_value.list_snaps
.assert_called_once_with())
mock_rados_client.assert_called_once_with(self.driver)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.mock_rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
3, self.mock_rbd.RBD.return_value.remove.call_count)
self.assertEqual(3, len(RAISED_EXCEPTIONS))
# Make sure the exception was raised
self.assertIn(self.mock_rbd.ImageBusy, RAISED_EXCEPTIONS)
@common_mocks
def test_delete_volume_not_found(self):
self.mock_rbd.Image.return_value.list_snaps.return_value = []
self.mock_rbd.RBD.return_value.remove.side_effect = (
self.mock_rbd.ImageNotFound)
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (None, None, None)
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
with mock.patch.object(driver, 'RADOSClient') as \
mock_rados_client:
self.assertIsNone(self.driver.delete_volume(self.volume_a))
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_a.name,
None)
(self.mock_rbd.Image.return_value.list_snaps
.assert_called_once_with())
mock_rados_client.assert_called_once_with(self.driver)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.mock_rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.mock_rbd.RBD.return_value.remove.call_count)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageNotFound])
@common_mocks
@mock.patch('cinder.objects.Volume.get_by_id')
def test_create_snapshot(self, volume_get_by_id):
volume_get_by_id.return_value = self.volume_a
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.create_snapshot(self.snapshot)
args = [str(self.snapshot.name)]
proxy.create_snap.assert_called_with(*args)
proxy.protect_snap.assert_called_with(*args)
@common_mocks
@mock.patch('cinder.objects.Volume.get_by_id')
def test_delete_snapshot(self, volume_get_by_id):
volume_get_by_id.return_value = self.volume_a
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.delete_snapshot(self.snapshot)
proxy.remove_snap.assert_called_with(self.snapshot.name)
proxy.unprotect_snap.assert_called_with(self.snapshot.name)
@common_mocks
@mock.patch('cinder.objects.Volume.get_by_id')
def test_delete_notfound_snapshot(self, volume_get_by_id):
volume_get_by_id.return_value = self.volume_a
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
proxy.unprotect_snap.side_effect = (
self.mock_rbd.ImageNotFound)
self.driver.delete_snapshot(self.snapshot)
proxy.remove_snap.assert_called_with(self.snapshot.name)
proxy.unprotect_snap.assert_called_with(self.snapshot.name)
@common_mocks
@mock.patch('cinder.objects.Volume.get_by_id')
def test_delete_notfound_on_remove_snapshot(self, volume_get_by_id):
volume_get_by_id.return_value = self.volume_a
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
proxy.remove_snap.side_effect = (
self.mock_rbd.ImageNotFound)
self.driver.delete_snapshot(self.snapshot)
proxy.remove_snap.assert_called_with(self.snapshot.name)
proxy.unprotect_snap.assert_called_with(self.snapshot.name)
@common_mocks
@mock.patch('cinder.objects.Volume.get_by_id')
def test_delete_unprotected_snapshot(self, volume_get_by_id):
volume_get_by_id.return_value = self.volume_a
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
proxy.unprotect_snap.side_effect = self.mock_rbd.InvalidArgument
self.driver.delete_snapshot(self.snapshot)
self.assertTrue(proxy.unprotect_snap.called)
self.assertTrue(proxy.remove_snap.called)
@common_mocks
@mock.patch('cinder.objects.Volume.get_by_id')
def test_delete_busy_snapshot(self, volume_get_by_id):
volume_get_by_id.return_value = self.volume_a
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
proxy.unprotect_snap.side_effect = (
self.mock_rbd.ImageBusy)
with mock.patch.object(self.driver, '_get_children_info') as \
mock_get_children_info:
mock_get_children_info.return_value = [('pool', 'volume2')]
with mock.patch.object(driver, 'LOG') as \
mock_log:
self.assertRaises(exception.SnapshotIsBusy,
self.driver.delete_snapshot,
self.snapshot)
mock_get_children_info.assert_called_once_with(
proxy,
self.snapshot.name)
self.assertTrue(mock_log.info.called)
self.assertTrue(proxy.unprotect_snap.called)
self.assertFalse(proxy.remove_snap.called)
@common_mocks
def test_get_children_info(self):
volume = self.mock_proxy
volume.set_snap = mock.Mock()
volume.list_children = mock.Mock()
list_children = [('pool', 'volume2')]
volume.list_children.return_value = list_children
info = self.driver._get_children_info(volume,
self.snapshot['name'])
self.assertEqual(list_children, info)
@common_mocks
def test_get_clone_info(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume, self.volume_a.name)
self.assertEqual(parent_info, info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_get_clone_info_w_snap(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name))
volume.parent_info.return_value = parent_info
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_a.name,
snap=snapshot)
self.assertEqual(parent_info, info)
self.assertEqual(2, volume.set_snap.call_count)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_get_clone_info_w_exception(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
volume.parent_info.side_effect = self.mock_rbd.ImageNotFound
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_a.name,
snap=snapshot)
self.assertEqual((None, None, None), info)
self.assertEqual(2, volume.set_snap.call_count)
volume.parent_info.assert_called_once_with()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_get_clone_info_deleted_volume(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume,
"%s.deleted" % (self.volume_a.name))
self.assertEqual(parent_info, info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_create_cloned_volume_same_size(self):
self.cfg.rbd_max_clone_depth = 2
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
with mock.patch.object(self.driver, '_resize') as mock_resize:
mock_get_clone_depth.return_value = 1
self.driver.create_cloned_volume(self.volume_b, self.volume_a)
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
self.mock_rbd.Image.return_value.close \
.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
self.assertEqual(
0, mock_resize.call_count)
@common_mocks
def test_create_cloned_volume_different_size(self):
self.cfg.rbd_max_clone_depth = 2
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
with mock.patch.object(self.driver, '_resize') as mock_resize:
mock_get_clone_depth.return_value = 1
self.volume_b.size = 20
self.driver.create_cloned_volume(self.volume_b, self.volume_a)
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
self.mock_rbd.Image.return_value.close \
.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
self.assertEqual(
1, mock_resize.call_count)
@common_mocks
def test_create_cloned_volume_w_flatten(self):
self.cfg.rbd_max_clone_depth = 1
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (
('fake_pool', self.volume_b.name,
'.'.join((self.volume_b.name, 'clone_snap'))))
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.driver.create_cloned_volume(self.volume_b, self.volume_a)
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
(self.mock_rbd.Image.return_value.unprotect_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.remove_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
# We expect the driver to close both volumes, so 2 is expected
self.assertEqual(
2, self.mock_rbd.Image.return_value.close.call_count)
self.assertTrue(mock_get_clone_depth.called)
@common_mocks
def test_create_cloned_volume_w_clone_exception(self):
self.cfg.rbd_max_clone_depth = 2
self.mock_rbd.RBD.return_value.clone.side_effect = (
self.mock_rbd.RBD.Error)
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.assertRaises(self.mock_rbd.RBD.Error,
self.driver.create_cloned_volume,
self.volume_b, self.volume_a)
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
(self.mock_rbd.Image.return_value.unprotect_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.remove_snap
.assert_called_once_with('.'.join(
(self.volume_b.name, 'clone_snap'))))
self.mock_rbd.Image.return_value.close.assert_called_once_with()
@common_mocks
def test_good_locations(self):
locations = ['rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F', ]
map(self.driver._parse_location, locations)
@common_mocks
def test_bad_locations(self):
locations = ['rbd://image',
'http://path/to/somewhere/else',
'rbd://image/extra',
'rbd://image/',
'rbd://fsid/pool/image/',
'rbd://fsid/pool/image/snap/',
'rbd://///', ]
for loc in locations:
self.assertRaises(exception.ImageUnacceptable,
self.driver._parse_location,
loc)
self.assertFalse(
self.driver._is_cloneable(loc, {'disk_format': 'raw'}))
@common_mocks
def test_cloneable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
info = {'disk_format': 'raw'}
self.assertTrue(self.driver._is_cloneable(location, info))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_different_fsid(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://def/pool/image/snap'
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': 'raw'}))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_unreadable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
self.driver.rbd.Error = Exception
self.mock_proxy.side_effect = Exception
args = [location, {'disk_format': 'raw'}]
self.assertFalse(self.driver._is_cloneable(*args))
self.assertEqual(1, self.mock_proxy.call_count)
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_bad_format(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
formats = ['qcow2', 'vmdk', 'vdi']
for f in formats:
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': f}))
self.assertTrue(mock_get_fsid.called)
def _copy_image(self):
with mock.patch.object(tempfile, 'NamedTemporaryFile'):
with mock.patch.object(os.path, 'exists') as mock_exists:
mock_exists.return_value = True
with mock.patch.object(image_utils, 'fetch_to_raw'):
with mock.patch.object(self.driver, 'delete_volume'):
with mock.patch.object(self.driver, '_resize'):
mock_image_service = mock.MagicMock()
args = [None, self.volume_a,
mock_image_service, None]
self.driver.copy_image_to_volume(*args)
@common_mocks
def test_copy_image_no_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = None
self._copy_image()
@common_mocks
def test_copy_image_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = '/var/run/cinder/tmp'
self._copy_image()
@common_mocks
def test_update_volume_stats(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.mon_command = mock.Mock()
client.cluster.mon_command.return_value = (
0, '{"stats":{"total_bytes":64385286144,'
'"total_used_bytes":3289628672,"total_avail_bytes":61095657472},'
'"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,'
'"bytes_used":1546440971,"max_avail":28987613184,"objects":412}},'
'{"name":"volumes","id":3,"stats":{"kb_used":0,"bytes_used":0,'
'"max_avail":28987613184,"objects":0}}]}\n', '')
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(
volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb=28.44,
free_capacity_gb=27.0,
reserved_percentage=0,
multiattach=False)
actual = self.driver.get_volume_stats(True)
client.cluster.mon_command.assert_called_once_with(
'{"prefix":"df", "format":"json"}', '')
self.assertDictMatch(expected, actual)
@common_mocks
def test_update_volume_stats_error(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.mon_command = mock.Mock()
client.cluster.mon_command.return_value = (22, '', '')
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0,
multiattach=False)
actual = self.driver.get_volume_stats(True)
client.cluster.mon_command.assert_called_once_with(
'{"prefix":"df", "format":"json"}', '')
self.assertDictMatch(expected, actual)
@common_mocks
def test_get_mon_addrs(self):
with mock.patch.object(self.driver, '_execute') as mock_execute:
mock_execute.return_value = (CEPH_MON_DUMP, '')
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
self.assertEqual((hosts, ports), self.driver._get_mon_addrs())
@common_mocks
def test_initialize_connection(self):
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
with mock.patch.object(self.driver, '_get_mon_addrs') as \
mock_get_mon_addrs:
mock_get_mon_addrs.return_value = (hosts, ports)
expected = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.cfg.rbd_pool,
self.volume_a.name),
'hosts': hosts,
'ports': ports,
'cluster_name': self.cfg.rbd_cluster_name,
'auth_enabled': False,
'auth_username': None,
'secret_type': 'ceph',
'secret_uuid': None,
'volume_id': self.volume_a.id
}
}
actual = self.driver.initialize_connection(self.volume_a, None)
self.assertDictMatch(expected, actual)
self.assertTrue(mock_get_mon_addrs.called)
@ddt.data({'rbd_chunk_size': 1, 'order': 20},
{'rbd_chunk_size': 8, 'order': 23},
{'rbd_chunk_size': 32, 'order': 25})
@ddt.unpack
@common_mocks
def test_clone(self, rbd_chunk_size, order):
self.cfg.rbd_store_chunk_size = rbd_chunk_size
src_pool = u'images'
src_image = u'image-name'
src_snap = u'snapshot-name'
client_stack = []
def mock__enter__(inst):
def _inner():
client_stack.append(inst)
return inst
return _inner
client = self.mock_client.return_value
# capture both rados client used to perform the clone
client.__enter__.side_effect = mock__enter__(client)
self.driver._clone(self.volume_a, src_pool, src_image, src_snap)
args = [client_stack[0].ioctx, str(src_image), str(src_snap),
client_stack[1].ioctx, str(self.volume_a.name)]
kwargs = {'features': client.features,
'order': order}
self.mock_rbd.RBD.return_value.clone.assert_called_once_with(
*args, **kwargs)
self.assertEqual(2, client.__enter__.call_count)
@common_mocks
def test_extend_volume(self):
fake_size = '20'
size = int(fake_size) * units.Gi
with mock.patch.object(self.driver, '_resize') as mock_resize:
self.driver.extend_volume(self.volume_a, fake_size)
mock_resize.assert_called_once_with(self.volume_a, size=size)
@common_mocks
def test_retype(self):
context = {}
diff = {'encryption': {},
'extra_specs': {}}
updates = {'name': 'testvolume',
'host': 'currenthost',
'id': fake.VOLUME_ID}
fake_type = 'high-IOPS'
volume = fake_volume.fake_volume_obj(context, **updates)
# The hosts have been checked same before rbd.retype
# is called.
# RBD doesn't support multiple pools in a driver.
host = {'host': 'currenthost'}
self.assertTrue(self.driver.retype(context, volume,
fake_type, diff, host))
# The encryptions have been checked as same before rbd.retype
# is called.
diff['encryption'] = {}
self.assertTrue(self.driver.retype(context, volume,
fake_type, diff, host))
# extra_specs changes are supported.
diff['extra_specs'] = {'non-empty': 'non-empty'}
self.assertTrue(self.driver.retype(context, volume,
fake_type, diff, host))
diff['extra_specs'] = {}
self.assertTrue(self.driver.retype(context, volume,
fake_type, diff, host))
@common_mocks
def test_update_migrated_volume(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver.rbd.RBD(), 'rename') as mock_rename:
context = {}
mock_rename.return_value = 0
model_update = self.driver.update_migrated_volume(context,
self.volume_a,
self.volume_b,
'available')
mock_rename.assert_called_with(client.ioctx,
'volume-%s' % self.volume_b.id,
'volume-%s' % self.volume_a.id)
self.assertEqual({'_name_id': None,
'provider_location': None}, model_update)
def test_rbd_volume_proxy_init(self):
mock_driver = mock.Mock(name='driver')
mock_driver._connect_to_rados.return_value = (None, None)
with driver.RBDVolumeProxy(mock_driver, self.volume_a.name):
self.assertEqual(1, mock_driver._connect_to_rados.call_count)
self.assertFalse(mock_driver._disconnect_from_rados.called)
self.assertEqual(1, mock_driver._disconnect_from_rados.call_count)
mock_driver.reset_mock()
snap = u'snapshot-name'
with driver.RBDVolumeProxy(mock_driver, self.volume_a.name,
snapshot=snap):
self.assertEqual(1, mock_driver._connect_to_rados.call_count)
self.assertFalse(mock_driver._disconnect_from_rados.called)
self.assertEqual(1, mock_driver._disconnect_from_rados.call_count)
@common_mocks
def test_connect_to_rados(self):
# Default
self.cfg.rados_connect_timeout = -1
self.mock_rados.Rados.return_value.open_ioctx.return_value = \
self.mock_rados.Rados.return_value.ioctx
# default configured pool
ret = self.driver._connect_to_rados()
self.assertTrue(self.mock_rados.Rados.return_value.connect.called)
# Expect no timeout if default is used
self.mock_rados.Rados.return_value.connect.assert_called_once_with()
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1])
self.mock_rados.Rados.return_value.open_ioctx.assert_called_with(
self.cfg.rbd_pool)
conf_set = self.mock_rados.Rados.return_value.conf_set
conf_set.assert_not_called()
# different pool
ret = self.driver._connect_to_rados('alt_pool')
self.assertTrue(self.mock_rados.Rados.return_value.connect.called)
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1])
self.mock_rados.Rados.return_value.open_ioctx.assert_called_with(
'alt_pool')
# With timeout
self.cfg.rados_connect_timeout = 1
self.mock_rados.Rados.return_value.connect.reset_mock()
self.driver._connect_to_rados()
conf_set.assert_has_calls((mock.call('rados_osd_op_timeout', '1'),
mock.call('rados_mon_op_timeout', '1'),
mock.call('client_mount_timeout', '1')))
self.mock_rados.Rados.return_value.connect.assert_called_once_with()
# error
self.mock_rados.Rados.return_value.open_ioctx.reset_mock()
self.mock_rados.Rados.return_value.shutdown.reset_mock()
self.mock_rados.Rados.return_value.open_ioctx.side_effect = (
self.mock_rados.Error)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._connect_to_rados)
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(
3, self.mock_rados.Rados.return_value.shutdown.call_count)
class RBDImageIOWrapperTestCase(test.TestCase):
def setUp(self):
super(RBDImageIOWrapperTestCase, self).setUp()
self.meta = mock.Mock()
self.meta.user = 'mock_user'
self.meta.conf = 'mock_conf'
self.meta.pool = 'mock_pool'
self.meta.image = mock.Mock()
self.meta.image.read = mock.Mock()
self.meta.image.write = mock.Mock()
self.meta.image.size = mock.Mock()
self.mock_rbd_wrapper = driver.RBDImageIOWrapper(self.meta)
self.data_length = 1024
self.full_data = b'abcd' * 256
def test_init(self):
self.assertEqual(self.mock_rbd_wrapper._rbd_meta, self.meta)
self.assertEqual(0, self.mock_rbd_wrapper._offset)
def test_inc_offset(self):
self.mock_rbd_wrapper._inc_offset(10)
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(20, self.mock_rbd_wrapper._offset)
def test_rbd_image(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_image, self.meta.image)
def test_rbd_user(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_user, self.meta.user)
def test_rbd_pool(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_conf, self.meta.conf)
def test_rbd_conf(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_pool, self.meta.pool)
def test_read(self):
def mock_read(offset, length):
return self.full_data[offset:length]
self.meta.image.read.side_effect = mock_read
self.meta.image.size.return_value = self.data_length
data = self.mock_rbd_wrapper.read()
self.assertEqual(self.full_data, data)
data = self.mock_rbd_wrapper.read()
self.assertEqual(b'', data)
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read()
self.assertEqual(self.full_data, data)
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read(10)
self.assertEqual(self.full_data[:10], data)
def test_write(self):
self.mock_rbd_wrapper.write(self.full_data)
self.assertEqual(1024, self.mock_rbd_wrapper._offset)
def test_seekable(self):
self.assertTrue(self.mock_rbd_wrapper.seekable)
def test_seek(self):
self.assertEqual(0, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(10, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(10, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(10, 1)
self.assertEqual(20, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(0)
self.mock_rbd_wrapper.write(self.full_data)
self.meta.image.size.return_value = self.data_length
self.mock_rbd_wrapper.seek(0)
self.assertEqual(0, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(10, 2)
self.assertEqual(self.data_length + 10, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(-10, 2)
self.assertEqual(self.data_length - 10, self.mock_rbd_wrapper._offset)
# test exceptions.
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, 0, 3)
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, -1)
# offset should not have been changed by any of the previous
# operations.
self.assertEqual(self.data_length - 10, self.mock_rbd_wrapper._offset)
def test_tell(self):
self.assertEqual(0, self.mock_rbd_wrapper.tell())
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(10, self.mock_rbd_wrapper.tell())
def test_flush(self):
with mock.patch.object(driver, 'LOG') as mock_logger:
self.meta.image.flush = mock.Mock()
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once_with()
self.meta.image.flush.reset_mock()
# this should be caught and logged silently.
self.meta.image.flush.side_effect = AttributeError
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once_with()
msg = _("flush() not supported in this version of librbd")
mock_logger.warning.assert_called_with(msg)
def test_fileno(self):
self.assertRaises(IOError, self.mock_rbd_wrapper.fileno)
def test_close(self):
self.mock_rbd_wrapper.close()
class ManagedRBDTestCase(test_volume.DriverTestCase):
driver_name = "cinder.volume.drivers.rbd.RBDDriver"
def setUp(self):
super(ManagedRBDTestCase, self).setUp()
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0,
'pools': {}}
self.called = []
def _create_volume_from_image(self, expected_status, raw=False,
clone_error=False):
"""Try to clone a volume from an image, and check status afterwards.
NOTE: if clone_error is True we force the image type to raw otherwise
clone_image is not called
"""
# See tests.image.fake for image types.
if raw:
image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
else:
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# creating volume testdata
db_volume = {'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'availability_zone': 'fake_zone',
'attach_status': 'detached',
'host': 'dummy'}
volume = objects.Volume(context=self.context, **db_volume)
volume.create()
try:
if not clone_error:
self.volume.create_volume(self.context, volume,
request_spec={'image_id': image_id})
else:
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume,
request_spec={'image_id': image_id})
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual(expected_status, volume.status)
finally:
# cleanup
volume.destroy()
@mock.patch.object(cinder.image.glance, 'get_default_image_service')
def test_create_vol_from_image_status_available(self, mock_gdis):
"""Clone raw image then verify volume is in available state."""
def _mock_clone_image(context, volume, image_location,
image_meta, image_service):
return {'provider_location': None}, True
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=True)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertFalse(mock_create.called)
self.assertTrue(mock_gdis.called)
@mock.patch.object(cinder.image.glance, 'get_default_image_service')
@mock.patch('cinder.image.image_utils.TemporaryImages.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_vol_from_non_raw_image_status_available(
self, mock_qemu_info, mock_fetch, mock_gdis):
"""Clone non-raw image then verify volume is in available state."""
def _mock_clone_image(context, volume, image_location,
image_meta, image_service):
return {'provider_location': None}, False
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
mock_fetch.return_value = mock.MagicMock(spec=utils.get_file_spec())
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=False)
self.assertTrue(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertTrue(mock_create.called)
self.assertTrue(mock_gdis.called)
@mock.patch.object(cinder.image.glance, 'get_default_image_service')
def test_create_vol_from_image_status_error(self, mock_gdis):
"""Fail to clone raw image then verify volume is in error state."""
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = exception.CinderException
with mock.patch.object(self.volume.driver, 'create_volume'):
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('error', raw=True,
clone_error=True)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertFalse(self.volume.driver.create_volume.called)
self.assertTrue(mock_gdis.called)
def test_clone_failure(self):
driver = self.volume.driver
with mock.patch.object(driver, '_is_cloneable', lambda *args: False):
image_loc = (mock.Mock(), None)
actual = driver.clone_image(mock.Mock(),
mock.Mock(),
image_loc,
{},
mock.Mock())
self.assertEqual(({}, False), actual)
self.assertEqual(({}, False),
driver.clone_image('', object(), None, {}, ''))
def test_clone_success(self):
expected = ({'provider_location': None}, True)
driver = self.volume.driver
with mock.patch.object(self.volume.driver, '_is_cloneable') as \
mock_is_cloneable:
mock_is_cloneable.return_value = True
with mock.patch.object(self.volume.driver, '_clone') as \
mock_clone:
with mock.patch.object(self.volume.driver, '_resize') as \
mock_resize:
image_loc = ('rbd://fee/fi/fo/fum', None)
volume = {'name': 'vol1'}
actual = driver.clone_image(mock.Mock(),
volume,
image_loc,
{'disk_format': 'raw',
'id': 'id.foo'},
mock.Mock())
self.assertEqual(expected, actual)
mock_clone.assert_called_once_with(volume,
'fi', 'fo', 'fum')
mock_resize.assert_called_once_with(volume)
def test_clone_multilocation_success(self):
expected = ({'provider_location': None}, True)
driver = self.volume.driver
def cloneable_side_effect(url_location, image_meta):
return url_location == 'rbd://fee/fi/fo/fum'
with mock.patch.object(self.volume.driver, '_is_cloneable') \
as mock_is_cloneable, \
mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
mock.patch.object(self.volume.driver, '_resize') \
as mock_resize:
mock_is_cloneable.side_effect = cloneable_side_effect
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_clone.assert_called_once_with(volume,
'fi', 'fo', 'fum')
mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum',
image_meta)
mock_resize.assert_called_once_with(volume)
def test_clone_multilocation_failure(self):
expected = ({}, False)
driver = self.volume.driver
with mock.patch.object(driver, '_is_cloneable', return_value=False) \
as mock_is_cloneable, \
mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
mock.patch.object(self.volume.driver, '_resize') \
as mock_resize:
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum',
image_meta)
mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum',
image_meta)
self.assertFalse(mock_clone.called)
self.assertFalse(mock_resize.called)
|
|
#!/usr/bin/python
import csv
#import classes
from node import Node
from condition import Condition
from nodelist import NodeList
from flowPrinter import FlowPrinter
#1. Parse from CSV
#2. Create Node from Parse object - store in list
#3. Iterate the list and inject the path accordingly
#4. Generate Flow
#**GLOBAL
ISDEBUG = 0
PRINT_FLOW ='y'
PRINT_OUTPUT='n'
DIVMARK = '|'
#Index of the csv input
TYPE = 0
ID = 1
DES = 2
NEXTID = 3
TEXT = 4
DATAOBJ = 5
REQ = 6
NOCONDI = 7
#Multiple Condi des and nextid
gCSVFileName = "perso_AID_process.csv"
gOutputFile = "test.txt"
gHeadingFile = "header_format.txt"
gNodeList = None
gPrintNode = ""
###END OF GLOBAL
sType = ""
sID = ""
sDes = ""
sNextID = ""
sText =""
sDataObj = ""
sReq= ""
iNoCondi = 0
sCondiDes = ""
sCondiNextID = ""
listofCondi = []
listOfNode = []
#--------------------------------
#Input
#--------------------------------
fileName = input("CSV file name to read: ")
fileName.rstrip()
if fileName != "":
gCSVFileName =fileName +".csv"
fileName = input("Text file name to output: ")
fileName.rstrip()
if fileName != "":
gOutputFile = fileName +".txt"
printNode = input("Specific T Node name to output: ")
printNode.rstrip()
if printNode != "":
gPrintNode = printNode
print ("Reading :", gCSVFileName)
print ("Output node :", gPrintNode)
print ("Output to :", gOutputFile)
###
# Parse CSV and create Node List
##
#Open CSV
with open (gCSVFileName, 'rU') as csvfile:
nodeReader = csv.reader(csvfile, delimiter=';', quotechar='|')
for row in nodeReader:
if (row[TYPE] == "Type"):
continue
for index in range(len(row)):
#if index == 1 : print(row[1])
if index == TYPE :
sType = row[index]
if index == ID :
sID = row[index]
if index == DES :
sDes = row[index]
if index == NEXTID :
sNextID = row[index]
if index == TEXT :
sText = row[index]
if index == DATAOBJ :
sDataObj = row[index]
if index == REQ:
sReq = row[index]
if index == NOCONDI :
if(row[index] == ""):
iNoCondi = 0
break
else:
iNoCondi = int(row[index])
tempIndex = index
listofCondi = []
for noCondiItem in range(iNoCondi):
condi = Condition (row[tempIndex +1], row[tempIndex +2])
tempIndex += 2
#Push condi to the list
listofCondi.append(condi)
#END of Condi loop
break #end of condi reading
#END OF row looping
#Create Node
listOfPath = []
node = Node(sType, sID, sDes, sNextID, sText, sDataObj, sReq, iNoCondi, listofCondi, listOfPath )
#Push Node to list
listOfNode.append(node)
#END of CSV loop
#END of CSV file
#put list into node list class
gNodeList = NodeList(listOfNode)
###
# END OF Parse CSV and create Node List
##
###
# Iterate NodeList and Inject dependency path accordingly
##
#Get list of T node
TNodeList = gNodeList.getNodeListWithType("T")
#Check 1st node in list is S node
if gNodeList.getNode(0).getType() != "S":
print ('Error!! 1st node is not S')
#Iterate Node List
for node in gNodeList.getList():
#S node processing
if node.getType() == 'S':
gNodeList.getNodeWithID(node.getNextID()).addPath('S'+ DIVMARK)
#P node processing
if node.getType() == 'P':
if ISDEBUG ==1 : print ("Processing :", node.getID())
#Get own path and append own ID
listOfPath2Inject = node.getListofPath();
for i in range (len(listOfPath2Inject)):
listOfPath2Inject[i] = listOfPath2Inject[i] + node.getID()
#END OF Get Own path
node2Inject = gNodeList.getNodeWithID(node.getNextID())
for path in node.getListofPath():
if ISDEBUG ==1 : print ("From %s Injecting path %s to node %s" %(node.getID(), path, node2Inject.getID()))
newPath = path + DIVMARK
node2Inject.addPath(newPath)
# END OF P node processing
#C node processing
if node.getType() == 'C':
if ISDEBUG ==1 : print ("Processing :", node.getID())
#Get own path and append own ID
listOfPath2Inject = node.getListofPath();
for i in range (len(listOfPath2Inject)):
listOfPath2Inject[i] = listOfPath2Inject[i] + node.getID()
#END OF Get Own path
#For each condition
for i in range (len(node.getCondiList())):
tempCondiList = node.getCondiList()
condi = tempCondiList[i]
if ISDEBUG ==1 : print ("Processing condi :", condi.getNextID())
node2Inject = gNodeList.getNodeWithID(condi.getNextID())
nodeInjectFrom = node
for path in node.getListofPath():
if ISDEBUG ==1 : print ("From %s Injecting path %s to node %s" %(node.getID(), path, node2Inject.getID()))
#Add condi no into path
newPath = path + '.' + str(i) + DIVMARK
node2Inject.addPath(newPath)
if ISDEBUG ==1 :print("--------------------------------------")
#END OF For each Condition
#END OF C node processing
#END OF Iterate Node List
if PRINT_FLOW == 'y' :
for node in gNodeList.getList():
print (node.getID())
print("------------------------------")
for path in node.getListofPath():
print (path)
print("===============================")
#-------------------------------
#Print Comment to file
#-------------------------------
printer = FlowPrinter(gHeadingFile)
stringToPrint = ""
if gPrintNode != "":
#Print specific Node
node2print = gNodeList.getNodeWithID(gPrintNode)
stringToPrint = stringToPrint + printer.prepareTestCommentforNode(gNodeList, node2print.getID())
if PRINT_OUTPUT == 'y':
print(stringToPrint)
else:
for node2print in gNodeList.getNodeListWithType('T'):
stringToPrint = stringToPrint + printer.prepareTestCommentforNode(gNodeList, node2print.getID())
if PRINT_OUTPUT == 'y':
print(stringToPrint)
print(" >>>>>>> Flow for %s is print to %s" %(node2print.getID(), gOutputFile))
outputFile = open (gOutputFile, "w")
outputFile.write(stringToPrint)
outputFile.close()
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime as dt
import six
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.task import atomic
from rally.task import utils as bench_utils
class CeilometerScenario(scenario.OpenStackScenario):
"""Base class for Ceilometer scenarios with basic atomic actions."""
def _make_samples(self, count=1, interval=0, counter_name="cpu_util",
counter_type="gauge", counter_unit="%", counter_volume=1,
project_id=None, user_id=None, source=None,
timestamp=None, metadata_list=None, batch_size=None):
"""Prepare and return a list of samples.
:param count: specifies number of samples in array
:param interval: specifies interval between timestamps of near-by
samples
:param counter_name: specifies name of the counter
:param counter_type: specifies type of the counter
:param counter_unit: specifies unit of the counter
:param counter_volume: specifies volume of the counter
:param project_id: specifies project id for samples
:param user_id: specifies user id for samples
:param source: specifies source for samples
:param timestamp: specifies timestamp for samples
:param metadata_list: specifies list of resource metadata
:param batch_size: specifies number of samples to store in one query
:returns: generator that produces lists of samples
"""
batch_size = batch_size or count
sample = {
"counter_name": counter_name,
"counter_type": counter_type,
"counter_unit": counter_unit,
"counter_volume": counter_volume,
"resource_id": self.generate_random_name()
}
opt_fields = {
"project_id": project_id,
"user_id": user_id,
"source": source,
"timestamp": timestamp,
}
for k, v in six.iteritems(opt_fields):
if v:
sample.update({k: v})
len_meta = len(metadata_list) if metadata_list else 0
now = timestamp or dt.datetime.utcnow()
samples = []
for i in six.moves.xrange(count):
if i and not (i % batch_size):
yield samples
samples = []
sample_item = dict(sample)
sample_item["timestamp"] = (
now - dt.timedelta(seconds=(interval * i))
).isoformat()
if metadata_list:
# NOTE(idegtiarov): Adding more than one template of metadata
# required it's proportional distribution among whole samples.
sample_item["resource_metadata"] = metadata_list[
i * len_meta // count
]
samples.append(sample_item)
yield samples
def _make_query_item(self, field, op="eq", value=None):
"""Create a SimpleQuery item for requests.
:param field: filtered field
:param op: operator for filtering
:param value: matched value
:returns: dict with field, op and value keys for query
"""
return {"field": field, "op": op, "value": value}
def _make_general_query(self, filter_by_project_id=None,
filter_by_user_id=None,
filter_by_resource_id=None,
metadata_query=None):
"""Create a SimpleQuery for the list benchmarks.
:param filter_by_project_id: add a project id to query
:param filter_by_user_id: add a user id to query
:param filter_by_resource_id: add a resource id to query
:param metadata_query: metadata dict that will add to query
:returns: SimpleQuery with specified items
"""
query = []
metadata_query = metadata_query or {}
if filter_by_user_id:
user_id = self.context["user"]["id"]
query.append(self._make_query_item("user_id", "eq", user_id))
if filter_by_project_id or filter_by_resource_id:
project_id = self.context["tenant"]["id"]
if filter_by_project_id:
query.append(self._make_query_item("project_id", "eq",
project_id))
if filter_by_resource_id:
resource_id = self.context["tenant"]["resources"][0]
query.append(self._make_query_item("resource_id", "eq",
resource_id))
for key, value in metadata_query.items():
query.append(self._make_query_item("metadata.%s" % key,
value=value))
return query
def _make_timestamp_query(self, start_time=None, end_time=None):
"""Create ceilometer query for timestamp range.
:param start_time: start datetime in isoformat
:param end_time: end datetime in isoformat
:returns: query with timestamp range
"""
query = []
if end_time and start_time and end_time < start_time:
msg = "End time should be great or equal than start time"
raise exceptions.InvalidArgumentsException(msg)
if start_time:
query.append(self._make_query_item("timestamp", ">=", start_time))
if end_time:
query.append(self._make_query_item("timestamp", "<=", end_time))
return query
def _make_profiler_key(self, method, query=None, limit=None):
"""Create key for profiling method with query.
:param method: Original profiler tag for method
:param query: ceilometer query which fields will be added to key
:param limit: if it exists `limit` will be added to key
:returns: profiler key that includes method and queried fields
"""
query = query or []
limit_line = limit and "limit" or ""
fields_line = "&".join("%s" % a["field"] for a in query)
key_identifiers = "&".join(x for x in (limit_line, fields_line) if x)
key = ":".join(x for x in (method, key_identifiers) if x)
return key
def _get_alarm_dict(self, **kwargs):
"""Prepare and return an alarm dict for creating an alarm.
:param kwargs: optional parameters to create alarm
:returns: alarm dictionary used to create an alarm
"""
alarm_id = self.generate_random_name()
alarm = {"alarm_id": alarm_id,
"name": alarm_id,
"description": "Test Alarm"}
alarm.update(kwargs)
return alarm
@atomic.action_timer("ceilometer.list_alarms")
def _list_alarms(self, alarm_id=None):
"""List alarms.
List alarm matching alarm_id. It fetches all alarms
if alarm_id is None.
:param alarm_id: specifies id of the alarm
:returns: list of alarms
"""
if alarm_id:
return self.clients("ceilometer").alarms.get(alarm_id)
else:
return self.clients("ceilometer").alarms.list()
@atomic.action_timer("ceilometer.create_alarm")
def _create_alarm(self, meter_name, threshold, kwargs):
"""Create an alarm.
:param meter_name: specifies meter name of the alarm
:param threshold: specifies alarm threshold
:param kwargs: contains optional features of alarm to be created
:returns: alarm
"""
alarm_dict = self._get_alarm_dict(**kwargs)
alarm_dict.update({"meter_name": meter_name,
"threshold": threshold})
alarm = self.clients("ceilometer").alarms.create(**alarm_dict)
return alarm
@atomic.action_timer("ceilometer.delete_alarm")
def _delete_alarm(self, alarm_id):
"""Delete an alarm.
:param alarm_id: specifies id of the alarm
"""
self.clients("ceilometer").alarms.delete(alarm_id)
@atomic.action_timer("ceilometer.update_alarm")
def _update_alarm(self, alarm_id, alarm_dict_delta):
"""Update an alarm.
:param alarm_id: specifies id of the alarm
:param alarm_dict_delta: features of alarm to be updated
"""
self.clients("ceilometer").alarms.update(alarm_id, **alarm_dict_delta)
@atomic.action_timer("ceilometer.get_alarm_history")
def _get_alarm_history(self, alarm_id):
"""Assemble the alarm history requested.
:param alarm_id: specifies id of the alarm
:returns: list of alarm changes
"""
return self.clients("ceilometer").alarms.get_history(alarm_id)
@atomic.action_timer("ceilometer.get_alarm_state")
def _get_alarm_state(self, alarm_id):
"""Get the state of the alarm.
:param alarm_id: specifies id of the alarm
:returns: state of the alarm
"""
return self.clients("ceilometer").alarms.get_state(alarm_id)
@atomic.action_timer("ceilometer.set_alarm_state")
def _set_alarm_state(self, alarm, state, timeout):
"""Set the state of the alarm.
:param alarm: alarm instance
:param state: an alarm state to be set
:param timeout: The number of seconds for which to attempt a
successful check of the alarm state.
:returns: alarm in the set state
"""
self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state)
return bench_utils.wait_for(alarm,
ready_statuses=[state],
update_resource=bench_utils
.get_from_manager(),
timeout=timeout, check_interval=1)
@atomic.action_timer("ceilometer.list_events")
def _list_events(self):
"""Get list of user's events.
It fetches all events.
:returns: list of events
"""
return self.admin_clients("ceilometer").events.list()
@atomic.action_timer("ceilometer.get_event")
def _get_event(self, event_id):
"""Get event with specific id.
Get event matching event_id.
:param event_id: specifies id of the event
:returns: event
"""
return self.admin_clients("ceilometer").events.get(event_id)
@atomic.action_timer("ceilometer.list_event_types")
def _list_event_types(self):
"""Get list of all event types.
:returns: list of event types
"""
return self.admin_clients("ceilometer").event_types.list()
@atomic.action_timer("ceilometer.list_event_traits")
def _list_event_traits(self, event_type, trait_name):
"""Get list of event traits.
:param event_type: specifies the type of event
:param trait_name: specifies trait name
:returns: list of event traits
"""
return self.admin_clients("ceilometer").traits.list(event_type,
trait_name)
@atomic.action_timer("ceilometer.list_event_trait_descriptions")
def _list_event_trait_descriptions(self, event_type):
"""Get list of event trait descriptions.
:param event_type: specifies the type of event
:returns: list of event trait descriptions
"""
return self.admin_clients("ceilometer").trait_descriptions.list(
event_type)
def _list_samples(self, query=None, limit=None):
"""List all Samples.
:param query: optional param that specify query
:param limit: optional param for maximum number of samples returned
:returns: list of samples
"""
key = self._make_profiler_key("ceilometer.list_samples", query,
limit)
with atomic.ActionTimer(self, key):
return self.clients("ceilometer").new_samples.list(q=query,
limit=limit)
@atomic.action_timer("ceilometer.get_resource")
def _get_resource(self, resource_id):
"""Retrieve details about one resource."""
return self.clients("ceilometer").resources.get(resource_id)
@atomic.action_timer("ceilometer.get_stats")
def _get_stats(self, meter_name, query=None, period=None, groupby=None,
aggregates=None):
"""Get stats for a specific meter.
:param meter_name: Name of ceilometer meter
:param query: list of queries
:param period: the length of the time range covered by these stats
:param groupby: the fields used to group the samples
:param aggregates: function for samples aggregation
:returns: list of statistics data
"""
return self.clients("ceilometer").statistics.list(meter_name, q=query,
period=period,
groupby=groupby,
aggregates=aggregates
)
@atomic.action_timer("ceilometer.create_meter")
def _create_meter(self, **kwargs):
"""Create a new meter.
:param kwargs: Contains the optional attributes for meter creation
:returns: Newly created meter
"""
name = self.generate_random_name()
samples = self.clients("ceilometer").samples.create(
counter_name=name, **kwargs)
return samples[0]
@atomic.action_timer("ceilometer.query_alarms")
def _query_alarms(self, filter, orderby, limit):
"""Query alarms with specific parameters.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: queried alarms
"""
return self.clients("ceilometer").query_alarms.query(
filter, orderby, limit)
@atomic.action_timer("ceilometer.query_alarm_history")
def _query_alarm_history(self, filter, orderby, limit):
"""Query history of an alarm.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: alarm history
"""
return self.clients("ceilometer").query_alarm_history.query(
filter, orderby, limit)
@atomic.action_timer("ceilometer.create_sample")
def _create_sample(self, counter_name, counter_type, counter_unit,
counter_volume, resource_id=None, **kwargs):
"""Create a Sample with specified parameters.
:param counter_name: specifies name of the counter
:param counter_type: specifies type of the counter
:param counter_unit: specifies unit of the counter
:param counter_volume: specifies volume of the counter
:param resource_id: specifies resource id for the sample created
:param kwargs: contains optional parameters for creating a sample
:returns: created sample
"""
kwargs.update({"counter_name": counter_name,
"counter_type": counter_type,
"counter_unit": counter_unit,
"counter_volume": counter_volume,
"resource_id": resource_id if resource_id
else self.generate_random_name()})
return self.clients("ceilometer").samples.create(**kwargs)
@atomic.action_timer("ceilometer.create_samples")
def _create_samples(self, samples):
"""Create Samples with specified parameters.
:param samples: a list of samples to create
:returns: created list samples
"""
return self.clients("ceilometer").samples.create_list(samples)
@atomic.action_timer("ceilometer.query_samples")
def _query_samples(self, filter, orderby, limit):
"""Query samples with specified parameters.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: queried samples
"""
return self.clients("ceilometer").query_samples.query(
filter, orderby, limit)
def _list_resources(self, query=None, limit=None):
"""List all resources.
:param query: query list for Ceilometer api
:param limit: count of returned resources
:returns: list of all resources
"""
key = self._make_profiler_key("ceilometer.list_resources", query,
limit)
with atomic.ActionTimer(self, key):
return self.clients("ceilometer").resources.list(q=query,
limit=limit)
def _list_meters(self, query=None, limit=None):
"""Get list of user's meters.
:param query: query list for Ceilometer api
:param limit: count of returned meters
:returns: list of all meters
"""
key = self._make_profiler_key("ceilometer.list_meters", query,
limit)
with atomic.ActionTimer(self, key):
return self.clients("ceilometer").meters.list(q=query,
limit=limit)
|
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for Interrogate."""
import socket
from grr.client.client_actions import admin
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact_test
from grr.lib import client_index
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
# pylint: disable=unused-import
from grr.lib.flows.general import discovery
# pylint: enable=unused-import
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
class DiscoveryTestEventListener(flow.EventListener):
"""A test listener to receive new client discoveries."""
well_known_session_id = rdfvalue.SessionID(flow_name="discovery_test")
EVENTS = ["Discovery"]
# For this test we just write the event as a class attribute.
event = None
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
_ = message
DiscoveryTestEventListener.event = event
class TestClientInterrogate(artifact_test.ArtifactTest):
"""Test the interrogate flow."""
def _CheckUsers(self, all_users):
"""Check all user stores."""
summary = self.fd.GetSummary()
self.assertItemsEqual([x.username for x in summary.users], all_users)
users = [x.username for x in self.fd.Get(self.fd.Schema.USER)]
self.assertItemsEqual(users, all_users)
self.assertItemsEqual(self.fd.Get(self.fd.Schema.USERNAMES), all_users)
# Check kb users
kbusers = [x.username for x in
self.fd.Get(self.fd.Schema.KNOWLEDGE_BASE).users]
self.assertItemsEqual(kbusers, all_users)
def _CheckAFF4Object(self, hostname, system, install_date):
self.assertEqual(self.fd.Get(self.fd.Schema.HOSTNAME), hostname)
self.assertEqual(self.fd.Get(self.fd.Schema.SYSTEM), system)
self.assertEqual(self.fd.Get(self.fd.Schema.INSTALL_DATE), install_date)
def _CheckClientInfo(self):
info = self.fd.Get(self.fd.Schema.CLIENT_INFO)
self.assertEqual(info.client_name, config_lib.CONFIG["Client.name"])
self.assertEqual(info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(info.build_time, config_lib.CONFIG["Client.build_time"])
def _CheckGRRConfig(self):
"""Check old and new client config."""
config_info = self.fd.Get(self.fd.Schema.GRR_CONFIGURATION)
self.assertEqual(config_info["Client.control_urls"],
["http://localhost:8001/control"])
self.assertEqual(config_info["Client.poll_min"], 1.0)
def _CheckClientIndex(self, host_pattern):
"""Check that the index has been updated."""
index_fd = aff4.FACTORY.Create(self.fd.Schema.client_index, "AFF4Index",
mode="r", token=self.token)
self.assertEqual(
[self.fd.urn],
[x for x in index_fd.Query([self.fd.Schema.HOSTNAME], host_pattern)])
def _CheckClientKwIndex(self, keywords, expected_count):
# Tests that the client index has expected_count results when
# searched for keywords.
index = aff4.FACTORY.Create(client_index.MAIN_INDEX,
aff4_type="ClientIndex",
mode="rw",
token=self.token)
self.assertEqual(len(index.LookupClients(keywords)),
expected_count)
def _CheckNotificationsCreated(self):
user_fd = aff4.FACTORY.Open("aff4:/users/test", token=self.token)
notifications = user_fd.Get(user_fd.Schema.PENDING_NOTIFICATIONS)
self.assertEqual(len(notifications), 1)
notification = notifications[0]
self.assertEqual(notification.subject, rdfvalue.RDFURN(self.client_id))
def _CheckClientSummary(self, osname, version, kernel="3.13.0-39-generic",
release="5"):
summary = self.fd.GetSummary()
self.assertEqual(summary.client_info.client_name,
config_lib.CONFIG["Client.name"])
self.assertEqual(summary.client_info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(summary.client_info.build_time,
config_lib.CONFIG["Client.build_time"])
self.assertEqual(summary.system_info.system, osname)
self.assertEqual(summary.system_info.node, "test_node")
self.assertEqual(summary.system_info.release, release)
self.assertEqual(summary.system_info.version, version)
self.assertEqual(summary.system_info.machine, "i386")
self.assertEqual(summary.system_info.kernel, kernel)
self.assertEqual(len(summary.interfaces), 1)
self.assertEqual(summary.interfaces[0].mac_address, "123456")
# Check that the client summary was published to the event listener.
self.assertEqual(DiscoveryTestEventListener.event.client_id, self.client_id)
self.assertEqual(
DiscoveryTestEventListener.event.interfaces[0].mac_address,
"123456")
def _CheckNetworkInfo(self):
net_fd = self.fd.OpenMember("network")
interfaces = list(net_fd.Get(net_fd.Schema.INTERFACES))
self.assertEqual(interfaces[0].mac_address, "123456")
self.assertEqual(interfaces[0].addresses[0].human_readable, "100.100.100.1")
self.assertEqual(socket.inet_ntop(
socket.AF_INET, interfaces[0].addresses[0].packed_bytes),
"100.100.100.1")
# Mac addresses should be available as hex for searching
mac_addresses = self.fd.Get(self.fd.Schema.MAC_ADDRESS)
self.assertTrue("123456".encode("hex") in str(mac_addresses))
# Same for IP addresses.
ip_addresses = self.fd.Get(self.fd.Schema.HOST_IPS)
self.assertTrue("100.100.100.1" in str(ip_addresses))
def _CheckVFS(self):
# Check that virtual directories exist for the mount points
fd = aff4.FACTORY.Open(self.client_id.Add("fs/os/mnt/data"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("fs/tsk/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("devices/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
def _CheckLabelIndex(self):
"""Check that label indexes are updated."""
index = aff4.FACTORY.Create(
client_index.MAIN_INDEX, aff4_type="ClientIndex",
mode="rw", token=self.token)
self.assertEqual(
list(index.LookupClients(["label:Label2"])),
[self.client_id])
def _CheckWindowsDiskInfo(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token)
volumes = client.Get(client.Schema.VOLUMES)
self.assertEqual(len(volumes), 2)
for result in volumes:
self.assertTrue(isinstance(result, rdf_client.Volume))
self.assertTrue(result.windowsvolume.drive_letter in ["Z:", "C:"])
def _CheckRegistryPathspec(self):
# This tests that we can click refresh on a key in the registry vfs subtree
# even if we haven't downloaded any other key above it in the tree.
fd = aff4.FACTORY.Open(self.client_id.Add("registry").Add(
"HKEY_LOCAL_MACHINE").Add("random/path/bla"), token=self.token)
pathspec = fd.real_pathspec
self.assertEqual(pathspec.pathtype, rdf_paths.PathSpec.PathType.REGISTRY)
self.assertEqual(pathspec.CollapsePath(),
u"/HKEY_LOCAL_MACHINE/random/path/bla")
def _CheckRelease(self, desired_release, desired_version):
# Test for correct Linux release override behaviour.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
release = str(client.Get(client.Schema.OS_RELEASE))
version = str(client.Get(client.Schema.OS_VERSION))
self.assertEqual(release, desired_release)
self.assertEqual(version, desired_version)
def _CheckClientLibraries(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token)
libs = client.Get(client.Schema.LIBRARY_VERSIONS)
self.assertTrue(libs is not None)
libs = libs.ToDict()
error_str = admin.GetLibraryVersions.error_str
# Strip off the exception itself.
error_str = error_str[:error_str.find("%s")]
for key in admin.GetLibraryVersions.library_map:
self.assertIn(key, libs)
self.assertFalse(libs[key].startswith(error_str))
def testInterrogateLinuxWithWtmp(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
test_lib.FakeTestDataVFSHandler):
with test_lib.ConfigOverrider(
{"Artifacts.knowledge_base": ["LinuxWtmp",
"NetgroupConfiguration",
"LinuxRelease"],
"Artifacts.netgroup_filter_regexes": [r"^login$"]}):
self.SetLinuxClient()
client_mock = action_mocks.InterrogatedClient(
"TransferBuffer", "StatFile", "Find", "HashBuffer",
"ListDirectory", "FingerprintFile", "GetLibraryVersions",
"HashFile")
client_mock.InitializeClient()
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Linux", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*test.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Linux", "14.4", release="Ubuntu",
kernel="3.13.0-39-generic")
self._CheckRelease("Ubuntu", "14.4")
# users 1,2,3 from wtmp
# users yagharek, isaac from netgroup
self._CheckUsers(["yagharek", "isaac", "user1", "user2", "user3"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckClientKwIndex(["Linux"], 1)
self._CheckClientKwIndex(["Label2"], 1)
self._CheckClientLibraries()
def testInterrogateWindows(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
with test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.REGISTRY, test_lib.FakeRegistryVFSHandler):
with test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, test_lib.FakeFullVFSHandler):
client_mock = action_mocks.InterrogatedClient(
"TransferBuffer", "StatFile", "Find", "HashBuffer",
"ListDirectory", "FingerprintFile", "GetLibraryVersions")
self.SetWindowsClient()
client_mock.InitializeClient(system="Windows", version="6.1.7600",
kernel="6.1.7601")
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Windows", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*Host.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Windows", "6.1.7600", kernel="6.1.7601")
# users Bert and Ernie added by the fixture should not be present (USERS
# overriden by kb)
# jim parsed from registry profile keys
self._CheckUsers(["jim", "kovacs"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckWindowsDiskInfo()
self._CheckRegistryPathspec()
self._CheckClientKwIndex(["Linux"], 0)
self._CheckClientKwIndex(["Windows"], 1)
self._CheckClientKwIndex(["Label2"], 1)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
from unittest import TestCase, main
from uritemplate import URITemplate, expand, partial
from uritemplate import variable
def merge_dicts(*args):
d = {}
for arg in args:
d.update(arg)
return d
class RFCTemplateExamples(type):
var = {'var': 'value'}
hello = {'hello': 'Hello World!'}
path = {'path': '/foo/bar'}
x = {'x': '1024'}
y = {'y': '768'}
empty = {'empty': ''}
merged_x_y = merge_dicts(x, y)
list_ex = {'list': ['red', 'green', 'blue']}
keys = {'keys': [('semi', ';'), ('dot', '.'), ('comma', ',')]}
# # Level 1
# Simple string expansion
level1_examples = {
'{var}': {
'expansion': var,
'expected': 'value',
},
'{hello}': {
'expansion': hello,
'expected': 'Hello%20World%21',
},
}
# # Level 2
# Reserved string expansion
level2_reserved_examples = {
'{+var}': {
'expansion': var,
'expected': 'value',
},
'{+hello}': {
'expansion': hello,
'expected': 'Hello%20World!',
},
'{+path}/here': {
'expansion': path,
'expected': '/foo/bar/here',
},
'here?ref={+path}': {
'expansion': path,
'expected': 'here?ref=/foo/bar',
},
}
# Fragment expansion, crosshatch-prefixed
level2_fragment_examples = {
'X{#var}': {
'expansion': var,
'expected': 'X#value',
},
'X{#hello}': {
'expansion': hello,
'expected': 'X#Hello%20World!'
},
}
# # Level 3
# String expansion with multiple variables
level3_multiple_variable_examples = {
'map?{x,y}': {
'expansion': merged_x_y,
'expected': 'map?1024,768',
},
'{x,hello,y}': {
'expansion': merge_dicts(x, y, hello),
'expected': '1024,Hello%20World%21,768',
},
}
# Reserved expansion with multiple variables
level3_reserved_examples = {
'{+x,hello,y}': {
'expansion': merge_dicts(x, y, hello),
'expected': '1024,Hello%20World!,768',
},
'{+path,x}/here': {
'expansion': merge_dicts(path, x),
'expected': '/foo/bar,1024/here',
},
}
# Fragment expansion with multiple variables
level3_fragment_examples = {
'{#x,hello,y}': {
'expansion': merge_dicts(x, y, hello),
'expected': '#1024,Hello%20World!,768',
},
'{#path,x}/here': {
'expansion': merge_dicts(path, x),
'expected': '#/foo/bar,1024/here'
},
}
# Label expansion, dot-prefixed
level3_label_examples = {
'X{.var}': {
'expansion': var,
'expected': 'X.value',
},
'X{.x,y}': {
'expansion': merged_x_y,
'expected': 'X.1024.768',
}
}
# Path segments, slash-prefixed
level3_path_segment_examples = {
'{/var}': {
'expansion': var,
'expected': '/value',
},
'{/var,x}/here': {
'expansion': merge_dicts(var, x),
'expected': '/value/1024/here',
},
}
# Path-style parameters, semicolon-prefixed
level3_path_semi_examples = {
'{;x,y}': {
'expansion': merged_x_y,
'expected': ';x=1024;y=768',
},
'{;x,y,empty}': {
'expansion': merge_dicts(x, y, empty),
'expected': ';x=1024;y=768;empty',
},
}
# Form-style query, ampersand-separated
level3_form_amp_examples = {
'{?x,y}': {
'expansion': merged_x_y,
'expected': '?x=1024&y=768',
},
'{?x,y,empty}': {
'expansion': merge_dicts(x, y, empty),
'expected': '?x=1024&y=768&empty=',
},
}
# Form-style query continuation
level3_form_cont_examples = {
'?fixed=yes{&x}': {
'expansion': x,
'expected': '?fixed=yes&x=1024',
},
'{&x,y,empty}': {
'expansion': merge_dicts(x, y, empty),
'expected': '&x=1024&y=768&empty=',
}
}
# # Level 4
# String expansion with value modifiers
level4_value_modifier_examples = {
'{var:3}': {
'expansion': var,
'expected': 'val',
},
'{var:30}': {
'expansion': var,
'expected': 'value',
},
'{list}': {
'expansion': list_ex,
'expected': 'red,green,blue',
},
'{list*}': {
'expansion': list_ex,
'expected': 'red,green,blue',
},
'{keys}': {
'expansion': keys,
'expected': 'semi,%3B,dot,.,comma,%2C',
},
'{keys*}': {
'expansion': keys,
'expected': 'semi=%3B,dot=.,comma=%2C',
},
}
# Reserved expansion with value modifiers
level4_reserved_examples = {
'{+path:6}/here': {
'expansion': path,
'expected': '/foo/b/here',
},
'{+list}': {
'expansion': list_ex,
'expected': 'red,green,blue',
},
'{+list*}': {
'expansion': list_ex,
'expected': 'red,green,blue',
},
'{+keys}': {
'expansion': keys,
'expected': 'semi,;,dot,.,comma,,',
},
'{+keys*}': {
'expansion': keys,
'expected': 'semi=;,dot=.,comma=,',
},
}
# Fragment expansion with value modifiers
level4_fragment_examples = {
'{#path:6}/here': {
'expansion': path,
'expected': '#/foo/b/here',
},
'{#list}': {
'expansion': list_ex,
'expected': '#red,green,blue',
},
'{#list*}': {
'expansion': list_ex,
'expected': '#red,green,blue',
},
'{#keys}': {
'expansion': keys,
'expected': '#semi,;,dot,.,comma,,'
},
'{#keys*}': {
'expansion': keys,
'expected': '#semi=;,dot=.,comma=,'
},
}
# Label expansion, dot-prefixed
level4_label_examples = {
'X{.var:3}': {
'expansion': var,
'expected': 'X.val',
},
'X{.list}': {
'expansion': list_ex,
'expected': 'X.red,green,blue',
},
'X{.list*}': {
'expansion': list_ex,
'expected': 'X.red.green.blue',
},
'X{.keys}': {
'expansion': keys,
'expected': 'X.semi,%3B,dot,.,comma,%2C',
},
'X{.keys*}': {
'expansion': keys,
'expected': 'X.semi=%3B.dot=..comma=%2C',
},
}
# Path segments, slash-prefixed
level4_path_slash_examples = {
'{/var:1,var}': {
'expansion': var,
'expected': '/v/value',
},
'{/list}': {
'expansion': list_ex,
'expected': '/red,green,blue',
},
'{/list*}': {
'expansion': list_ex,
'expected': '/red/green/blue',
},
'{/list*,path:4}': {
'expansion': merge_dicts(list_ex, path),
'expected': '/red/green/blue/%2Ffoo',
},
'{/keys}': {
'expansion': keys,
'expected': '/semi,%3B,dot,.,comma,%2C',
},
'{/keys*}': {
'expansion': keys,
'expected': '/semi=%3B/dot=./comma=%2C',
},
}
# Path-style parameters, semicolon-prefixed
level4_path_semi_examples = {
'{;hello:5}': {
'expansion': hello,
'expected': ';hello=Hello',
},
'{;list}': {
'expansion': list_ex,
'expected': ';list=red,green,blue',
},
'{;list*}': {
'expansion': list_ex,
'expected': ';list=red;list=green;list=blue',
},
'{;keys}': {
'expansion': keys,
'expected': ';keys=semi,%3B,dot,.,comma,%2C',
},
'{;keys*}': {
'expansion': keys,
'expected': ';semi=%3B;dot=.;comma=%2C',
},
}
# Form-style query, ampersand-separated
level4_form_amp_examples = {
'{?var:3}': {
'expansion': var,
'expected': '?var=val',
},
'{?list}': {
'expansion': list_ex,
'expected': '?list=red,green,blue',
},
'{?list*}': {
'expansion': list_ex,
'expected': '?list=red&list=green&list=blue',
},
'{?keys}': {
'expansion': keys,
'expected': '?keys=semi,%3B,dot,.,comma,%2C',
},
'{?keys*}': {
'expansion': keys,
'expected': '?semi=%3B&dot=.&comma=%2C',
},
}
# Form-style query continuation
level4_form_query_examples = {
'{&var:3}': {
'expansion': var,
'expected': '&var=val',
},
'{&list}': {
'expansion': list_ex,
'expected': '&list=red,green,blue',
},
'{&list*}': {
'expansion': list_ex,
'expected': '&list=red&list=green&list=blue',
},
'{&keys}': {
'expansion': keys,
'expected': '&keys=semi,%3B,dot,.,comma,%2C',
},
'{&keys*}': {
'expansion': keys,
'expected': '&semi=%3B&dot=.&comma=%2C',
},
}
def __new__(cls, name, bases, attrs):
def make_test(d):
def _test_(self):
for k, v in d.items():
t = URITemplate(k)
self.assertEqual(t.expand(v['expansion']), v['expected'])
return _test_
examples = [
(
n, getattr(RFCTemplateExamples, n)
) for n in dir(RFCTemplateExamples) if n.startswith('level')
]
for name, value in examples:
testname = 'test_%s' % name
attrs[testname] = make_test(value)
return type.__new__(cls, name, bases, attrs)
class TestURITemplate(RFCTemplateExamples('RFCMeta', (TestCase,), {})):
def test_no_variables_in_uri(self):
"""
This test ensures that if there are no variables present, the
template evaluates to itself.
"""
uri = 'https://api.github.com/users'
t = URITemplate(uri)
self.assertEqual(t.expand(), uri)
self.assertEqual(t.expand(users='foo'), uri)
def test_all_variables_parsed(self):
"""
This test ensures that all variables are parsed.
"""
uris = [
'https://api.github.com',
'https://api.github.com/users{/user}',
'https://api.github.com/repos{/user}{/repo}',
'https://api.github.com/repos{/user}{/repo}/issues{/issue}'
]
for i, uri in enumerate(uris):
t = URITemplate(uri)
self.assertEqual(len(t.variables), i)
def test_expand(self):
"""
This test ensures that expansion works as expected.
"""
# Single
t = URITemplate('https://api.github.com/users{/user}')
expanded = 'https://api.github.com/users/sigmavirus24'
self.assertEqual(t.expand(user='sigmavirus24'), expanded)
v = t.variables[0]
self.assertEqual(v.expand({'user': None}), {'/user': ''})
# Multiple
t = URITemplate('https://api.github.com/users{/user}{/repo}')
expanded = 'https://api.github.com/users/sigmavirus24/github3.py'
self.assertEqual(
t.expand({'repo': 'github3.py'}, user='sigmavirus24'),
expanded
)
def test_str_repr(self):
uri = 'https://api.github.com{/endpoint}'
t = URITemplate(uri)
self.assertEqual(str(t), uri)
self.assertEqual(str(t.variables[0]), '/endpoint')
self.assertEqual(repr(t), 'URITemplate("%s")' % uri)
self.assertEqual(repr(t.variables[0]), 'URIVariable(/endpoint)')
def test_hash(self):
uri = 'https://api.github.com{/endpoint}'
self.assertEqual(hash(URITemplate(uri)), hash(uri))
def test_default_value(self):
uri = 'https://api.github.com/user{/user=sigmavirus24}'
t = URITemplate(uri)
self.assertEqual(t.expand(),
'https://api.github.com/user/sigmavirus24')
self.assertEqual(t.expand(user='lukasa'),
'https://api.github.com/user/lukasa')
def test_query_expansion(self):
t = URITemplate('{foo}')
self.assertEqual(
t.variables[0]._query_expansion('foo', None, False, False), None
)
def test_label_path_expansion(self):
t = URITemplate('{foo}')
self.assertEqual(
t.variables[0]._label_path_expansion('foo', None, False, False),
None
)
def test_semi_path_expansion(self):
t = URITemplate('{foo}')
v = t.variables[0]
self.assertEqual(
v._semi_path_expansion('foo', None, False, False),
None
)
t.variables[0].operator = '?'
self.assertEqual(
v._semi_path_expansion('foo', ['bar', 'bogus'], True, False),
'foo=bar&foo=bogus'
)
def test_string_expansion(self):
t = URITemplate('{foo}')
self.assertEqual(
t.variables[0]._string_expansion('foo', None, False, False),
None
)
def test_hashability(self):
t = URITemplate('{foo}')
u = URITemplate('{foo}')
d = {t: 1}
d[u] += 1
self.assertEqual(d, {t: 2})
def test_no_mutate(self):
args = {}
t = URITemplate('')
t.expand(args, key=1)
self.assertEqual(args, {})
class TestURIVariable(TestCase):
def setUp(self):
self.v = variable.URIVariable('{foo}')
def test_post_parse(self):
v = self.v
self.assertEqual(v.join_str, ',')
self.assertEqual(v.operator, '')
self.assertEqual(v.safe, '')
self.assertEqual(v.start, '')
def test_post_parse_plus(self):
v = self.v
v.operator = '+'
v.post_parse()
self.assertEqual(v.join_str, ',')
self.assertEqual(v.safe, variable.URIVariable.reserved)
self.assertEqual(v.start, '')
def test_post_parse_octothorpe(self):
v = self.v
v.operator = '#'
v.post_parse()
self.assertEqual(v.join_str, ',')
self.assertEqual(v.safe, variable.URIVariable.reserved)
self.assertEqual(v.start, '#')
def test_post_parse_question(self):
v = self.v
v.operator = '?'
v.post_parse()
self.assertEqual(v.join_str, '&')
self.assertEqual(v.safe, '')
self.assertEqual(v.start, '?')
def test_post_parse_ampersand(self):
v = self.v
v.operator = '&'
v.post_parse()
self.assertEqual(v.join_str, '&')
self.assertEqual(v.safe, '')
self.assertEqual(v.start, '&')
class TestVariableModule(TestCase):
def test_is_list_of_tuples(self):
l = [(1, 2), (3, 4)]
self.assertEqual(variable.is_list_of_tuples(l), (True, l))
l = [1, 2, 3, 4]
self.assertEqual(variable.is_list_of_tuples(l), (False, None))
def test_list_test(self):
l = [1, 2, 3, 4]
self.assertEqual(variable.list_test(l), True)
l = str([1, 2, 3, 4])
self.assertEqual(variable.list_test(l), False)
def test_list_of_tuples_test(self):
l = [(1, 2), (3, 4)]
self.assertEqual(variable.dict_test(l), False)
d = dict(l)
self.assertEqual(variable.dict_test(d), True)
class TestAPI(TestCase):
uri = 'https://api.github.com{/endpoint}'
def test_expand(self):
self.assertEqual(expand(self.uri, {'endpoint': 'users'}),
'https://api.github.com/users')
def test_partial(self):
self.assertEqual(partial(self.uri), URITemplate(self.uri))
uri = self.uri + '/sigmavirus24{/other}'
self.assertEqual(
partial(uri, endpoint='users'),
URITemplate('https://api.github.com/users/sigmavirus24{/other}')
)
if __name__ == '__main__':
main()
|
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Dealer',)
from autobahn import util
from autobahn.wamp import types
from autobahn.wamp import role
from autobahn.wamp import message
from autobahn.wamp.exception import ProtocolError, ApplicationError
from autobahn.wamp.interfaces import IDealer, IRouter
from autobahn.wamp.message import _URI_PAT_STRICT_NON_EMPTY, _URI_PAT_LOOSE_NON_EMPTY
class Dealer:
"""
Basic WAMP dealer. This class implements :class:`autobahn.wamp.interfaces.IDealer`.
"""
def __init__(self, router, options):
"""
:param router: The router this dealer is part of.
:type router: Object that implements :class:`autobahn.wamp.interfaces.IRouter`.
:param options: Router options.
:type options: Instance of :class:`autobahn.wamp.types.RouterOptions`.
"""
self._router = router
self._options = options or types.RouterOptions()
## map: session -> set(registration)
## needed for removeSession
self._session_to_registrations = {}
## map: session_id -> session
## needed for exclude/eligible
self._session_id_to_session = {}
## map: procedure -> (registration, session)
self._procs_to_regs = {}
## map: registration -> procedure
self._regs_to_procs = {}
## pending callee invocation requests
self._invocations = {}
## check all procedure URIs with strict rules
self._option_uri_strict = self._options.uri_check == types.RouterOptions.URI_CHECK_STRICT
## supported features from "WAMP Advanced Profile"
self._role_features = role.RoleDealerFeatures(caller_identification = True, progressive_call_results = True)
def attach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.attach`
"""
assert(session not in self._session_to_registrations)
self._session_to_registrations[session] = set()
self._session_id_to_session[session._session_id] = session
def detach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.detach`
"""
assert(session in self._session_to_registrations)
for registration in self._session_to_registrations[session]:
del self._procs_to_regs[self._regs_to_procs[registration]]
del self._regs_to_procs[registration]
del self._session_to_registrations[session]
del self._session_id_to_session[session._session_id]
def processRegister(self, session, register):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processRegister`
"""
assert(session in self._session_to_registrations)
## check procedure URI
##
if (not self._option_uri_strict and not _URI_PAT_LOOSE_NON_EMPTY.match(register.procedure)) or \
( self._option_uri_strict and not _URI_PAT_STRICT_NON_EMPTY.match(register.procedure)):
reply = message.Error(message.Register.MESSAGE_TYPE, register.request, ApplicationError.INVALID_URI, ["register for invalid procedure URI '{}'".format(register.procedure)])
session._transport.send(reply)
else:
if not register.procedure in self._procs_to_regs:
## authorize action
##
d = self._as_future(self._router.authorize, session, register.procedure, IRouter.ACTION_REGISTER)
def on_authorize_success(authorized):
if authorized:
registration_id = util.id()
self._procs_to_regs[register.procedure] = (registration_id, session, register.discloseCaller)
self._regs_to_procs[registration_id] = register.procedure
self._session_to_registrations[session].add(registration_id)
reply = message.Registered(register.request, registration_id)
else:
reply = message.Error(message.Register.MESSAGE_TYPE, register.request, ApplicationError.NOT_AUTHORIZED, ["session is not authorized to register procedure '{}'".format(register.procedure)])
session._transport.send(reply)
def on_authorize_error(err):
reply = message.Error(message.Register.MESSAGE_TYPE, register.request, ApplicationError.AUTHORIZATION_FAILED, ["failed to authorize session for registering procedure '{}': {}".format(register.procedure, err.value)])
session._transport.send(reply)
self._add_future_callbacks(d, on_authorize_success, on_authorize_error)
else:
reply = message.Error(message.Register.MESSAGE_TYPE, register.request, ApplicationError.PROCEDURE_ALREADY_EXISTS, ["register for already registered procedure '{}'".format(register.procedure)])
session._transport.send(reply)
def processUnregister(self, session, unregister):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processUnregister`
"""
assert(session in self._session_to_registrations)
if unregister.registration in self._regs_to_procs:
## map registration ID to procedure URI
procedure = self._regs_to_procs[unregister.registration]
## get the session that originally registered the procedure
_, reg_session, _ = self._procs_to_regs[procedure]
if session != reg_session:
## procedure was registered by a different session!
##
reply = message.Error(message.Unregister.MESSAGE_TYPE, unregister.request, ApplicationError.NO_SUCH_REGISTRATION)
else:
## alright. the procedure had been registered by the session
## that now wants to unregister it.
##
del self._procs_to_regs[procedure]
del self._regs_to_procs[unregister.registration]
self._session_to_registrations[session].discard(unregister.registration)
reply = message.Unregistered(unregister.request)
else:
reply = message.Error(message.Unregister.MESSAGE_TYPE, unregister.request, ApplicationError.NO_SUCH_REGISTRATION)
session._transport.send(reply)
def processCall(self, session, call):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processCall`
"""
assert(session in self._session_to_registrations)
## check procedure URI
##
if (not self._option_uri_strict and not _URI_PAT_LOOSE_NON_EMPTY.match(call.procedure)) or \
( self._option_uri_strict and not _URI_PAT_STRICT_NON_EMPTY.match(call.procedure)):
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.INVALID_URI, ["call with invalid procedure URI '{}'".format(call.procedure)])
session._transport.send(reply)
else:
if call.procedure in self._procs_to_regs:
## validate payload
##
try:
self._router.validate('call', call.procedure, call.args, call.kwargs)
except Exception as e:
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.INVALID_ARGUMENT, ["call of procedure '{}' with invalid application payload: {}".format(call.procedure, e)])
session._transport.send(reply)
return
## authorize action
##
d = self._as_future(self._router.authorize, session, call.procedure, IRouter.ACTION_CALL)
def on_authorize_success(authorized):
if authorized:
registration_id, endpoint_session, discloseCaller = self._procs_to_regs[call.procedure]
request_id = util.id()
if discloseCaller or call.discloseMe:
caller = session._session_id
authid = session._authid
authrole = session._authrole
authmethod = session._authmethod
else:
caller = None
authid = None
authrole = None
authmethod = None
invocation = message.Invocation(request_id,
registration_id,
args = call.args,
kwargs = call.kwargs,
timeout = call.timeout,
receive_progress = call.receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
self._invocations[request_id] = (call, session)
endpoint_session._transport.send(invocation)
else:
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.NOT_AUTHORIZED, ["session is not authorized to call procedure '{}'".format(call.procedure)])
session._transport.send(reply)
def on_authorize_error(err):
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.AUTHORIZATION_FAILED, ["failed to authorize session for calling procedure '{}': {}".format(call.procedure, err.value)])
session._transport.send(reply)
self._add_future_callbacks(d, on_authorize_success, on_authorize_error)
else:
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.NO_SUCH_PROCEDURE, ["no procedure '{}' registered".format(call.procedure)])
session._transport.send(reply)
def processCancel(self, session, cancel):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processCancel`
"""
assert(session in self._session_to_registrations)
raise Exception("not implemented")
def processYield(self, session, yield_):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processYield`
"""
assert(session in self._session_to_registrations)
if yield_.request in self._invocations:
## get original call message and calling session
##
call_msg, call_session = self._invocations[yield_.request]
## validate payload
##
is_valid = True
try:
self._router.validate('call_result', call_msg.procedure, yield_.args, yield_.kwargs)
except Exception as e:
is_valid = False
reply = message.Error(message.Call.MESSAGE_TYPE, call_msg.request, ApplicationError.INVALID_ARGUMENT, ["call result from procedure '{}' with invalid application payload: {}".format(call_msg.procedure, e)])
else:
reply = message.Result(call_msg.request, args = yield_.args, kwargs = yield_.kwargs, progress = yield_.progress)
## the calling session might have been lost in the meantime ..
##
if call_session._transport:
call_session._transport.send(reply)
## the call is done if it's a regular call (non-progressive) or if the payload was invalid
##
if not yield_.progress or not is_valid:
del self._invocations[yield_.request]
else:
raise ProtocolError("Dealer.onYield(): YIELD received for non-pending request ID {}".format(yield_.request))
def processInvocationError(self, session, error):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processInvocationError`
"""
assert(session in self._session_to_registrations)
if error.request in self._invocations:
## get original call message and calling session
##
call_msg, call_session = self._invocations[error.request]
## validate payload
##
try:
self._router.validate('call_error', call_msg.procedure, error.args, error.kwargs)
except Exception as e:
reply = message.Error(message.Call.MESSAGE_TYPE, call_msg.request, ApplicationError.INVALID_ARGUMENT, ["call error from procedure '{}' with invalid application payload: {}".format(call_msg.procedure, e)])
else:
reply = message.Error(message.Call.MESSAGE_TYPE, call_msg.request, error.error, args = error.args, kwargs = error.kwargs)
## the calling session might have been lost in the meantime ..
##
if call_session._transport:
call_session._transport.send(reply)
## the call is done
##
del self._invocations[error.request]
else:
raise ProtocolError("Dealer.onInvocationError(): ERROR received for non-pending request_type {} and request ID {}".format(error.request_type, error.request))
IDealer.register(Dealer)
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for file sinks."""
from __future__ import absolute_import
import glob
import logging
import os
import shutil
import sys
import tempfile
import unittest
from builtins import range
import hamcrest as hc
import mock
import apache_beam as beam
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.io.filesystem import BeamIOError
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
# TODO: Refactor code so all io tests are using same library
# TestCaseWithTempDirCleanup class.
class _TestCaseWithTempDirCleanUp(unittest.TestCase):
"""Base class for TestCases that deals with TempDir clean-up.
Inherited test cases will call self._new_tempdir() to start a temporary dir
which will be deleted at the end of the tests (when tearDown() is called).
"""
def setUp(self):
self._tempdirs = []
def tearDown(self):
for path in self._tempdirs:
if os.path.exists(path):
shutil.rmtree(path)
self._tempdirs = []
def _new_tempdir(self):
result = tempfile.mkdtemp()
self._tempdirs.append(result)
return result
def _create_temp_file(self, name='', suffix='', dir=None, content=None):
if not name:
name = tempfile.template
if not dir:
dir = self._new_tempdir()
file_name = tempfile.NamedTemporaryFile(
delete=False, prefix=name,
dir=dir, suffix=suffix).name
if content:
with open(file_name, 'w') as f:
f.write(content)
return file_name
class MyFileBasedSink(filebasedsink.FileBasedSink):
def open(self, temp_path):
# TODO: Fix main session pickling.
# file_handle = super(MyFileBasedSink, self).open(temp_path)
file_handle = filebasedsink.FileBasedSink.open(self, temp_path)
file_handle.write(b'[start]')
return file_handle
def write_encoded_record(self, file_handle, encoded_value):
file_handle.write(b'[')
file_handle.write(encoded_value)
file_handle.write(b']')
def close(self, file_handle):
file_handle.write(b'[end]')
# TODO: Fix main session pickling.
# file_handle = super(MyFileBasedSink, self).close(file_handle)
file_handle = filebasedsink.FileBasedSink.close(self, file_handle)
class TestFileBasedSink(_TestCaseWithTempDirCleanUp):
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def _common_init(self, sink):
# Manually invoke the generic Sink API.
init_token = sink.initialize_write()
writer1 = sink.open_writer(init_token, '1')
writer1.write('a')
writer1.write('b')
res1 = writer1.close()
writer2 = sink.open_writer(init_token, '2')
writer2.write('x')
writer2.write('y')
writer2.write('z')
res2 = writer2.close()
return init_token, [res1, res2]
def test_file_sink_writing(self):
temp_path = os.path.join(self._new_tempdir(), 'FileBasedSink')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
init_token, writer_results = self._common_init(sink)
pre_finalize_results = sink.pre_finalize(init_token, writer_results)
finalize_res1 = list(sink.finalize_write(init_token, writer_results,
pre_finalize_results))
# Retry the finalize operation (as if the first attempt was lost).
finalize_res2 = list(sink.finalize_write(init_token, writer_results,
pre_finalize_results))
# Check the results.
shard1 = temp_path + '-00000-of-00002.output'
shard2 = temp_path + '-00001-of-00002.output'
self.assertEqual(finalize_res1, [shard1, shard2])
self.assertEqual(finalize_res2, [])
self.assertEqual(open(shard1).read(), '[start][a][b][end]')
self.assertEqual(open(shard2).read(), '[start][x][y][z][end]')
# Check that any temp files are deleted.
self.assertCountEqual([shard1, shard2], glob.glob(temp_path + '*'))
def test_file_sink_display_data(self):
temp_path = os.path.join(self._new_tempdir(), 'display')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher(
'compression', 'auto'),
DisplayDataItemMatcher(
'file_pattern',
'{}{}'.format(
temp_path,
'-%(shard_num)05d-of-%(num_shards)05d.output'))]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_empty_write(self):
temp_path = tempfile.NamedTemporaryFile().name
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder()
)
with TestPipeline() as p:
p | beam.Create([]) | beam.io.Write(sink) # pylint: disable=expression-not-assigned
self.assertEqual(
open(temp_path + '-00000-of-00001.output').read(), '[start][end]')
def test_static_value_provider_empty_write(self):
temp_path = StaticValueProvider(value_type=str,
value=tempfile.NamedTemporaryFile().name)
sink = MyFileBasedSink(
temp_path,
file_name_suffix=StaticValueProvider(value_type=str, value='.output'),
coder=coders.ToStringCoder()
)
with TestPipeline() as p:
p | beam.Create([]) | beam.io.Write(sink) # pylint: disable=expression-not-assigned
self.assertEqual(
open(temp_path.get() + '-00000-of-00001.output').read(), '[start][end]')
def test_fixed_shard_write(self):
temp_path = os.path.join(self._new_tempdir(), 'empty')
sink = MyFileBasedSink(
temp_path,
file_name_suffix='.output',
num_shards=3,
shard_name_template='_NN_SSS_',
coder=coders.ToStringCoder())
with TestPipeline() as p:
p | beam.Create(['a', 'b']) | beam.io.Write(sink) # pylint: disable=expression-not-assigned
concat = ''.join(
open(temp_path + '_03_%03d_.output' % shard_num).read()
for shard_num in range(3))
self.assertTrue('][a][' in concat, concat)
self.assertTrue('][b][' in concat, concat)
# Not using 'test' in name so that 'nose' doesn't pick this as a test.
def run_temp_dir_check(self, no_dir_path, dir_path, no_dir_root_path,
dir_root_path, prefix, separator):
def _get_temp_dir(file_path_prefix):
sink = MyFileBasedSink(
file_path_prefix, file_name_suffix='.output',
coder=coders.ToStringCoder())
return sink.initialize_write()
temp_dir = _get_temp_dir(no_dir_path)
self.assertTrue(temp_dir.startswith(prefix))
last_sep = temp_dir.rfind(separator)
self.assertTrue(temp_dir[last_sep + 1:].startswith('beam-temp'))
temp_dir = _get_temp_dir(dir_path)
self.assertTrue(temp_dir.startswith(prefix))
last_sep = temp_dir.rfind(separator)
self.assertTrue(temp_dir[last_sep + 1:].startswith('beam-temp'))
with self.assertRaises(ValueError):
_get_temp_dir(no_dir_root_path)
with self.assertRaises(ValueError):
_get_temp_dir(dir_root_path)
def test_temp_dir_uniqueness(self):
temp_path = os.path.join(self._new_tempdir(), 'unique')
sink = MyFileBasedSink(temp_path, coder=coders.ToStringCoder())
init_list = [''] * 1000
temp_dir_list = [sink._create_temp_dir(temp_path) for _ in init_list]
temp_dir_set = set(temp_dir_list)
self.assertEqual(len(temp_dir_list), len(temp_dir_set))
def test_temp_dir_gcs(self):
try:
self.run_temp_dir_check(
'gs://aaa/bbb', 'gs://aaa/bbb/', 'gs://aaa', 'gs://aaa/', 'gs://',
'/')
except ValueError:
logging.debug('Ignoring test since GCP module is not installed')
@mock.patch('apache_beam.io.localfilesystem.os')
def test_temp_dir_local(self, filesystem_os_mock):
# Here we test a unix-like mock file-system
# (not really testing Unix or Windows since we mock the function of 'os'
# module).
def _fake_unix_split(path):
sep = path.rfind('/')
if sep < 0:
raise ValueError('Path must contain a separator')
return (path[:sep], path[sep + 1:])
def _fake_unix_join(base, path):
return base + '/' + path
filesystem_os_mock.path.abspath = lambda a: a
filesystem_os_mock.path.split.side_effect = _fake_unix_split
filesystem_os_mock.path.join.side_effect = _fake_unix_join
self.run_temp_dir_check(
'/aaa/bbb', '/aaa/bbb/', '/', '/', '/', '/')
def test_file_sink_multi_shards(self):
temp_path = os.path.join(self._new_tempdir(), 'multishard')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
# Manually invoke the generic Sink API.
init_token = sink.initialize_write()
num_shards = 1000
writer_results = []
for i in range(num_shards):
uuid = 'uuid-%05d' % i
writer = sink.open_writer(init_token, uuid)
writer.write('a')
writer.write('b')
writer.write(uuid)
writer_results.append(writer.close())
pre_finalize_results = sink.pre_finalize(init_token, writer_results)
res = sorted(sink.finalize_write(init_token, writer_results,
pre_finalize_results))
for i in range(num_shards):
shard_name = '%s-%05d-of-%05d.output' % (temp_path, i, num_shards)
uuid = 'uuid-%05d' % i
self.assertEqual(res[i], shard_name)
self.assertEqual(
open(shard_name).read(), ('[start][a][b][%s][end]' % uuid))
# Check that any temp files are deleted.
self.assertCountEqual(res, glob.glob(temp_path + '*'))
@mock.patch.object(filebasedsink.FileSystems, 'rename')
def test_file_sink_rename_error(self, rename_mock):
temp_path = os.path.join(self._new_tempdir(), 'rename_error')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
init_token, writer_results = self._common_init(sink)
pre_finalize_results = sink.pre_finalize(init_token, writer_results)
error_str = 'mock rename error description'
rename_mock.side_effect = BeamIOError(
'mock rename error', {('src', 'dst'): error_str})
with self.assertRaisesRegexp(Exception, error_str):
list(sink.finalize_write(init_token, writer_results,
pre_finalize_results))
def test_file_sink_src_missing(self):
temp_path = os.path.join(self._new_tempdir(), 'src_missing')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
init_token, writer_results = self._common_init(sink)
pre_finalize_results = sink.pre_finalize(init_token, writer_results)
os.remove(writer_results[0])
with self.assertRaisesRegexp(Exception, r'not exist'):
list(sink.finalize_write(init_token, writer_results,
pre_finalize_results))
def test_file_sink_dst_matches_src(self):
temp_path = os.path.join(self._new_tempdir(), 'dst_matches_src')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
init_token, [res1, res2] = self._common_init(sink)
pre_finalize_results = sink.pre_finalize(init_token, [res1, res2])
list(sink.finalize_write(init_token, [res1, res2], pre_finalize_results))
self.assertFalse(os.path.exists(res1))
self.assertFalse(os.path.exists(res2))
shard1 = temp_path + '-00000-of-00002.output'
shard2 = temp_path + '-00001-of-00002.output'
self.assertEqual(open(shard1).read(), '[start][a][b][end]')
self.assertEqual(open(shard2).read(), '[start][x][y][z][end]')
os.makedirs(os.path.dirname(res1))
shutil.copyfile(shard1, res1)
shutil.copyfile(shard2, res2)
list(sink.finalize_write(init_token, [res1, res2], pre_finalize_results))
def test_pre_finalize(self):
temp_path = os.path.join(self._new_tempdir(), 'pre_finalize')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
init_token, [res1, res2] = self._common_init(sink)
# no-op
sink.pre_finalize(init_token, [res1, res2])
# Create finalized outputs from a previous run, which pre_finalize should
# delete.
shard1 = temp_path + '-00000-of-00002.output'
shard2 = temp_path + '-00001-of-00002.output'
with open(shard1, 'w') as f:
f.write('foo')
with open(shard2, 'w') as f:
f.write('foo')
self.assertTrue(os.path.exists(res1))
self.assertTrue(os.path.exists(res2))
self.assertTrue(os.path.exists(shard1))
self.assertTrue(os.path.exists(shard2))
sink.pre_finalize(init_token, [res1, res2])
self.assertTrue(os.path.exists(res1))
self.assertTrue(os.path.exists(res2))
self.assertFalse(os.path.exists(shard1))
self.assertFalse(os.path.exists(shard2))
@mock.patch.object(filebasedsink.FileSystems, 'delete')
def test_pre_finalize_error(self, delete_mock):
temp_path = os.path.join(self._new_tempdir(), 'pre_finalize')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
init_token, [res1, res2] = self._common_init(sink)
# no-op
sink.pre_finalize(init_token, [res1, res2])
# Create finalized outputs from a previous run, which pre_finalize should
# delete.
shard1 = temp_path + '-00000-of-00002.output'
shard2 = temp_path + '-00001-of-00002.output'
with open(shard1, 'w') as f:
f.write('foo')
with open(shard2, 'w') as f:
f.write('foo')
error_str = 'mock rename error description'
delete_mock.side_effect = BeamIOError(
'mock rename error', {shard2: error_str})
with self.assertRaisesRegexp(Exception, error_str):
sink.pre_finalize(init_token, [res1, res2])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
import six
import click
from ns1cli.cli import State, write_options
from ns1cli.util import Formatter
from nsone.rest.resource import ResourceException
class RecordFormatter(Formatter):
def print_record(self, rdata):
ans = rdata.pop('answers')
fil = rdata.pop('filters')
reg = rdata.pop('regions')
meta = rdata.pop('meta')
self.pretty_print(rdata)
click.secho('ANSWERS:', bold=True)
for a in ans:
self.pretty_print(a, 4)
if len(fil):
click.secho('FILTERS:', bold=True)
for f in fil:
self.pretty_print(f, 4)
if reg:
click.secho('REGIONS:', bold=True)
for r, data in reg.items():
click.secho(' ' + r)
self.pretty_print(data, 4)
if meta:
click.secho('META:', bold=True)
self.pretty_print(meta, 4)
def zone_argument(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.ZONE = value
return value
return click.argument('ZONE', expose_value=False, callback=callback)(f)
def domain_argument(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
# if no dot in the domain name, assume we should add zone
if value.find('.') == -1:
value = '%s.%s' % (value, state.ZONE)
state.DOMAIN = value
return value
return click.argument('DOMAIN', expose_value=False, callback=callback)(f)
def type_argument(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.TYPE = value
return value
return click.argument('TYPE', expose_value=False, callback=callback)(f)
def record_arguments(f):
# Order matters
f = type_argument(f)
f = domain_argument(f)
f = zone_argument(f)
return f
def _has_meta(resource):
return resource.get('meta', False)
@click.group('record',
short_help='view and modify records in a zone')
@click.pass_context
def cli(ctx):
"""Create, retrieve, update, and delete records in a zone."""
ctx.obj.formatter = RecordFormatter(ctx.obj.get_config('output_format'))
ctx.obj.record_api = ctx.obj.rest.records()
@cli.command('info', short_help='Get record details')
@record_arguments
@click.pass_context
def info(ctx):
"""Returns full configuration for a DNS record including basic config,
answers, regions, filter chain configuration, and all metadata tables
and data feeds attached to entities in the record.
\b
EXAMPLES:
ns1 record info test.com test A
ns1 record info test.com foo CNAME
\b
NOTES:
ZONE, DOMAIN, and record TYPE must be fully specified:
'record info example.com www.example.com A' returns
the A record for www.example.com in the example.com zone.
If no "dot" in DOMAIN, the zone is automatically appended to form a FQDN.
"""
try:
rdata = ctx.obj.record_api.retrieve(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
@cli.command('create',
short_help='Create a new record, optionally with simple answers')
@click.option('--target', type=str,
help='Create a linked record from an existing target')
@click.option('--ttl', type=int,
help='TTL (defaults to default zone ttl)')
@click.option('--use-client-subnet', type=bool,
help='Set use of client-subnet edns option (defaults to true on new records)')
@write_options
@record_arguments
@click.option('--mx_priority', type=int, required=False, multiple=True,
help='MX priority (ignored if type is not MX)')
@click.argument('ANSWERS', required=False, nargs=-1)
@click.pass_context
def create(ctx, answers, mx_priority, use_client_subnet, ttl, target):
"""Creates a new dns record in the specified ZONE, for the specified DOMAIN,
of the given record TYPE.
\b
You may not create multiple records of the same TYPE for the same DOMAIN name in a ZONE.
Instead, add new ANSWERs to the existing record. The default behavior if no filters are
in the filter chain is to return all ANSWERs matching a query. The new record will take
on the same networks as the ZONE it's in.
\b
RECORD TYPES:
Currently supported record TYPEs are:
- A, AAAA, ALIAS, AFSDB, CNAME, DNAME, HINFO, MX, NAPTR, NS, PTR, RP, SPF, SRV, TXT.
\b
ANSWERS:
Multiple ANSWERs can be provided, along with RDATA fields for a DNS record of the specified TYPE.
\b
LINKED RECORDS:
To create a linked record, specify the --target as a string whose contents is
the FQDN containing the config it should link to. If link is specified, no other
record configuration (such as answers or meta) should be specified.
record create --target src_domain test.com linked_domain A
\b
EXAMPLES:
ns1 record create --ttl 200 test.com test A 1.1.1.1
ns1 record create --target source test.com linked A
ns1 record create test.com test A 1.1.1.1 2.2.2.2 3.3.3.3
ns1 record create test.com mail MX --mx_priority 10 1.1.1.1
\b
NOTES:
if record type is MX, each given answer MUST have priority:
ns1 record create test.com mail MX --mx_priority 10 1.1.1.1 --mx_priority 20 2.2.2.2
"""
ctx.obj.check_write_lock()
options = {}
if ttl:
options['ttl'] = ttl
options['use_csubnet'] = use_client_subnet
if target:
if target.find('.') == -1:
target = '%s.%s' % (target, ctx.obj.ZONE)
options['link'] = target
if ctx.obj.TYPE == 'MX':
if len(set(mx_priority)) != len(mx_priority):
raise click.BadOptionUsage('answers must have unique priorities')
if len(mx_priority) != len(answers):
raise click.BadArgumentUsage('every answer must have a priority')
answers = six.itertools.izip(mx_priority, answers)
elif mx_priority:
raise click.BadOptionUsage('MX_priority is only allwed for MX records')
options['answers'] = answers
try:
rdata = ctx.obj.record_api.create(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE, **options)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
@cli.command('delete', short_help='Delete a record')
@write_options
@record_arguments
@click.pass_context
def delete(ctx):
"""Removes an existing record and all associated answers and configuration
details. NS1 will no longer respond for this record once it is deleted, and
it cannot be recovered, so use caution.
\b
Examples:
ns1 record delete test.com test A
ns1 record delete -f test.com test A
\b
NOTES:
This operation deletes all answers associated with the domain and record type.
"""
ctx.obj.check_write_lock()
try:
ctx.obj.record_api.delete(ctx.obj.ZONE, ctx.obj.DOMAIN, ctx.obj.TYPE)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
click.echo('{} deleted'.format(ctx.obj.DOMAIN))
# META
@cli.group('meta', short_help='View and modify record meta')
@click.pass_context
def meta(ctx):
"""View and modify record meta data"""
pass
@meta.command('set', short_help='Set meta key-value pairs for a record')
@write_options
@record_arguments
@click.argument('KEY')
@click.argument('VAL')
@click.pass_context
def meta_set(ctx, val, key):
"""Set meta data key/value pairs for a record. This will set the meta data
for the entire record, which will be used for an answer if there is no
answer meta. See ns1 list meta types
\b
EXAMPLES:
ns1 record meta set test.com geo A up false
"""
ctx.obj.check_write_lock()
# there is no rest api call to set meta without setting the entire
# record, so we have to retrieve it, alter it, and send it back
current = ctx.obj.record_api.retrieve(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE)
current['meta'][key] = val
try:
rdata = ctx.obj.record_api.update(ctx.obj.ZONE, ctx.obj.DOMAIN,
ctx.obj.TYPE, meta=current['meta'])
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
@meta.command('remove', short_help='Remove meta data key from a record')
@write_options
@record_arguments
@click.argument('KEY')
@click.pass_context
def meta_remove(ctx, key):
"""Remove meta data key/value pairs for a record. This will remove a meta
data key for the entire record.
\b
EXAMPLES:
ns1 record meta remove test.com geo A up
"""
ctx.obj.check_write_lock()
try:
# there is no rest api call to set meta without setting the entire
# answer, so we have to retrieve it, alter it, and send it back
current = ctx.obj.record_api.retrieve(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
try:
del current['meta'][key]
except KeyError:
raise click.BadParameter(
'record is missing metadata key %s' % key)
try:
rdata = ctx.obj.record_api.update(ctx.obj.ZONE, ctx.obj.DOMAIN,
ctx.obj.TYPE, meta=current['meta'])
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
# ANSWERS
@cli.group('answer', invoke_without_command=True,
short_help='View and modify a records answers')
@click.pass_context
def answer(ctx):
"""View and modify record answer data
"""
pass
@answer.command('add', short_help='Add an answer to a record')
@write_options
@record_arguments
@click.option('--mx_priority', type=int, required=False, multiple=True,
help='MX priority (ignored if type is not MX)')
@click.argument('ANSWER')
@click.pass_context
def add(ctx, mx_priority, answer):
"""Add an ANSWER to a record.
\b
EXAMPLES:
ns1 record answer add geo.test geocname.geo.test CNAME 1.1.1.1
"""
ctx.obj.check_write_lock()
answer = [answer]
if ctx.obj.TYPE == 'MX':
if not mx_priority:
raise click.BadArgumentUsage('MX answer must have a priority')
answer.append(mx_priority)
try:
record = ctx.obj.rest.loadRecord(ctx.obj.DOMAIN,
ctx.obj.TYPE,
zone=ctx.obj.ZONE)
record = record.addAnswers(answer)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(record.data)
return
ctx.obj.formatter.print_record(record.data)
# @answer.command('remove', short_help='remove an answer from a record')
# @write_options
# @record_arguments
# @click.argument('ANSWER')
# @click.pass_context
# def remove(ctx, answer):
# """remove an answer from a record
#
# Examples:
#
# record answer remove geo.test geocname.geo.test CNAME 1.1.1.1
# """
# if not ctx.obj.force:
# ctx.obj.check_write_lock()
#
# answer = [answer]
# record = ctx.obj.rest.loadRecord(ctx.obj.DOMAIN,
# ctx.obj.TYPE,
# zone=ctx.obj.ZONE)
# #@TODO: NOT WORKING
# record = record.removeAnswers(answer)
# ctx.obj.formatter.print_record(record.data)
# @TODO: Have to wait for Click v7.0 for nested command chaining
# It is currently not possible for chain commands to be nested.
# This will be fixed in future versions of Click.
# @answer.group('meta', short_help='blah',
# chain=True, invoke_without_command=True)
# @click.pass_context
# def answer_meta(ctx):
# """
#
# Examples:
#
# record answer meta set geo.test geocname.geo.test CNAME add
# """
# pass
@answer.command('meta-set', short_help='Set meta key-value pair for an answer')
@write_options
@record_arguments
@click.argument('ANSWER')
@click.argument('KEY')
@click.argument('VAL')
@click.pass_context
def answer_meta_set(ctx, val, key, answer):
"""Set meta data KEY/VALUE pairs for an ANSWER. See ns1 list meta types
\b
EXAMPLES:
ns1 record answer meta-set test.com geo A 1.2.3.4 georegion US-WEST
ns1 record answer meta-set test.com geo A 6.7.8.9 georegion US-EAST
ns1 record answer meta-set test.com geo A 3.3.3.3 georegion US-CENTRAL
"""
ctx.obj.check_write_lock()
try:
# there is no rest api call to set meta without setting the entire
# answer, so we have to retrieve it, alter it, and send it back
current = ctx.obj.record_api.retrieve(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
found = False
for a in current['answers']:
if a['answer'][0] == answer:
if not _has_meta(a):
a['meta'] = {}
a['meta'][key] = val
found = True
break
if not found:
raise click.BadParameter(
'%s is not a current answer for this record' % answer)
try:
rdata = ctx.obj.record_api.update(ctx.obj.ZONE, ctx.obj.DOMAIN,
ctx.obj.TYPE, answers=current['answers'])
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
@answer.command('meta-remove', short_help='Remove meta key from an answer')
@write_options
@record_arguments
@click.argument('ANSWER')
@click.argument('KEY')
@click.pass_context
def answer_meta_remove(ctx, key, answer):
"""Remove a meta data KEY/VALUE pair from an ANSWER.
\b
EXAMPLES:
ns1 record answer meta-remove test.com geo A 1.2.3.4 georegion
"""
ctx.obj.check_write_lock()
try:
# there is no rest api call to set meta without setting the entire
# answer, so we have to retrieve it, alter it, and send it back
current = ctx.obj.record_api.retrieve(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
for a in current['answers']:
if a['answer'][0] == answer:
if not _has_meta(a):
raise click.BadParameter('%s has no meta' % answer)
try:
del a['meta'][key]
# Remove the meta attr from answer if empty
if not a['meta']:
del a['meta']
except KeyError:
raise click.BadParameter(
'%s missing metadata key %s' % (answer, key))
try:
rdata = ctx.obj.record_api.update(ctx.obj.ZONE, ctx.obj.DOMAIN,
ctx.obj.TYPE, answers=current['answers'])
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
# REGIONS
# @TODO: Have to wait for Click v7.0 for nested command chaining
# It is currently not possible for chain commands to be nested.
# This will be fixed in future versions of Click.
# @region.group('meta', short_help='blah',
# chain=True, invoke_without_command=True)
# @click.pass_context
# def region_meta(ctx):
# """
#
# Examples:
#
# record region meta add geo.test geocname.geo.test CNAME us-west
# """
# pass
@cli.group('region', invoke_without_command=True,
short_help='View and modify a records answers')
@click.pass_context
def region(ctx):
"""View and modify record region data
"""
pass
@region.command('add', short_help='Add a region to a record')
@write_options
@record_arguments
@click.argument('REGION')
@click.pass_context
def add(ctx, region):
"""Add a REGION to a record.
\b
EXAMPLES:
ns1 record region add geo.test geocname.geo.test CNAME us-west
"""
ctx.obj.check_write_lock()
try:
# there is no rest api call to set meta without setting the entire
# answer, so we have to retrieve it, alter it, and send it back
current = ctx.obj.record_api.retrieve(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
for reg in current['regions'].keys():
if reg == region:
raise click.BadParameter(
'%s is already a current region for this record' % region)
current['regions'][region] = {'meta': {}}
try:
rdata = ctx.obj.record_api.update(ctx.obj.ZONE, ctx.obj.DOMAIN,
ctx.obj.TYPE, regions=current['regions'])
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
@region.command('remove', short_help='Remove a region from a record')
@write_options
@record_arguments
@click.argument('REGION')
@click.pass_context
def remove(ctx, region):
"""Remove a REGION from a record.
\b
EXAMPLES:
ns1 record region remove geo.test geocname.geo.test CNAME us-west
"""
ctx.obj.check_write_lock()
try:
# there is no rest api call to set meta without setting the entire
# answer, so we have to retrieve it, alter it, and send it back
current = ctx.obj.record_api.retrieve(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
found = False
for reg in current['regions'].keys():
if reg == region:
found = True
del current['regions'][region]
if not found:
raise click.BadParameter(
'%s is not a current region for this record' % region)
try:
rdata = ctx.obj.record_api.update(ctx.obj.ZONE, ctx.obj.DOMAIN,
ctx.obj.TYPE, regions=current['regions'])
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
@region.command('meta-set',
short_help='Set meta key-value pair for a region')
@write_options
@record_arguments
@click.argument('REGION')
@click.argument('KEY')
@click.argument('VAL')
@click.pass_context
def region_meta_set(ctx, val, key, region):
"""Set a meta data KEY/VALUE for a REGION. See ns1 list meta types
\b
EXAMPLES:
ns1 record region meta-set test.com geo A us-west up false
"""
ctx.obj.check_write_lock()
try:
# there is no rest api call to set meta without setting the entire
# answer, so we have to retrieve it, alter it, and send it back
current = ctx.obj.record_api.retrieve(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
found = False
for reg in current['regions'].keys():
if reg == region:
found = True
if not _has_meta(current['regions'][reg]['meta']):
current['regions'][reg]['meta'] = {}
current['regions'][reg]['meta'][key] = val
if not found:
raise click.BadParameter(
'%s is not a current region for this record' % region)
try:
rdata = ctx.obj.record_api.update(ctx.obj.ZONE, ctx.obj.DOMAIN,
ctx.obj.TYPE, regions=current['regions'])
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
@region.command('meta-remove',
short_help='Remove meta key from a region')
@write_options
@record_arguments
@click.argument('REGION')
@click.argument('KEY')
@click.pass_context
def region_meta_remove(ctx, key, region):
"""Remove a meta data KEY from a REGION.
\b
EXAMPLES:
ns1 record region meta-remove test.com geo A us-west up
"""
ctx.obj.check_write_lock()
try:
# there is no rest api call to set meta without setting the entire
# answer, so we have to retrieve it, alter it, and send it back
current = ctx.obj.record_api.retrieve(ctx.obj.ZONE,
ctx.obj.DOMAIN,
ctx.obj.TYPE)
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
if not current['regions'].get(region, None):
raise click.BadParameter(
'%s is not a current region for this record' % region)
if not _has_meta(current['regions'][region]):
raise click.BadParameter(
'region %s has no meta to remove' % region)
try:
del current['regions'][region]['meta'][key]
except KeyError:
raise click.BadParameter(
'region %s has no metakey %s' % (region, key))
try:
rdata = ctx.obj.record_api.update(ctx.obj.ZONE, ctx.obj.DOMAIN,
ctx.obj.TYPE, regions=current['regions'])
except ResourceException as e:
raise click.ClickException('REST API: %s' % e.message)
else:
if ctx.obj.formatter.output_format == 'json':
ctx.obj.formatter.out_json(rdata)
return
ctx.obj.formatter.print_record(rdata)
|
|
import os
import textwrap
import warnings
import glob
import numpy as np
from astropy.table import Table, join
from astropy.io import fits
import astropy.units as u
from spectral_cube import SpectralCube
from . import catalogs
from scipy.ndimage import binary_opening
import aplpy
from config import plottingDictionary
def mask_binary(imageHDU,LowestContour,selem):
map = imageHDU[0].data
mask = binary_opening(map > LowestContour, selem)
MaskedMap = mask*map
imageHDU[0].data = MaskedMap
return imageHDU, mask
def plot_moments_QA(file_extension='base_all'):
# Get list of regions - run from images/ directory
# Assume directories correspond to regions to be imaged
# Update - use catalog?
region_list = glob.glob("*/")
for i in range(len(region_list)):
region_list[i] = region_list[i].strip("/")
line_list = ['NH3_11','NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
label_list = ['NH$_3$(1,1)','NH$_3$(2,2)','NH$_3$(3,3)','C$_2$S','HC$_5$N',
'HC$_7$N (21-20)','HC$_7$N (22-21)']
extension = file_extension
color_table='magma'
text_color='black'
text_size = 12
beam_color='#d95f02' # previously used '#E31A1C'
# Try single set of contours for first look images
w11_step = 0.3
cont_levs=2**np.arange( 0,20)*w11_step
w11_lw = 0.5
# Masking of small (noisy) regions
selem = np.array([[0,1,0],[1,1,1],[0,1,0]])
for region in region_list:
plot_param = plottingDictionary[region]
# Want to use updated, rebaselined moment maps where available:
test_rebase = '{0}/{0}_NH3_11_{1}_rebase3_mom0_QA.fits'.format(region,file_extension)
if os.path.isfile(test_rebase):
extension = '{0}_rebase3'.format(file_extension)
else:
extension = file_extension
file_w11='{0}/{0}_NH3_11_{1}_mom0_QA.fits'.format(region,extension)
if os.path.isfile(file_w11):
LowestContour= cont_levs[0]*0.5
w11_hdu = fits.open(file_w11)
map = w11_hdu[0].data
mask = binary_opening(map > LowestContour, selem)
MaskedMap = mask*map
w11_hdu[0].data = MaskedMap
for i in range(len(line_list)):
line_i=line_list[i]
label_i=label_list[i]
file_mom0='{0}/{0}_{1}_{2}_mom0_QA.fits'.format(region,line_i,extension)
if os.path.isfile(file_mom0):
line_hdu = fits.open(file_mom0)
# Use percentiles to set initial plot colourscale ranges
v_min=np.nanpercentile(line_hdu[0].data,0.1)
v_max=np.nanpercentile(line_hdu[0].data,99.9)
fig=aplpy.FITSFigure(file_mom0, hdu=0)
if line_i == 'NH3_11':
fig.show_colorscale(cmap=color_table,vmin=v_min, vmax=v_max, stretch='log',
vmid=v_min-(1.*np.abs(v_min)))
cbar_ticks = [0,3,6,12,24,48,96]
# add colorbar
fig.add_colorbar()
fig.colorbar.show(box_orientation='horizontal', width=0.1, pad=0.0, ticks=cbar_ticks,
location='top', axis_label_text='Integrated Intensity (K km s$^{-1}$)')
elif (line_i in ['NH3_22','NH3_33']) :
fig.show_colorscale(cmap=color_table,vmin=v_min, vmax=v_max, stretch='linear',
vmid=v_min-(1.*np.abs(v_min)))
cbar_ticks = [0,1,2,3,6,12]
# add colorbar
fig.add_colorbar()
fig.colorbar.show(box_orientation='horizontal', width=0.1, pad=0.0, ticks= cbar_ticks,
location='top', axis_label_text='Integrated Intensity (K km s$^{-1}$)')
else:
fig.show_colorscale( cmap=color_table,vmin=v_min, vmax=v_max)
# add colorbar
fig.add_colorbar()
fig.colorbar.show( box_orientation='horizontal', width=0.1, pad=0.0,
location='top', axis_label_text='Integrated Intensity (K km s$^{-1}$)')
fig.colorbar.set_font(family='sans_serif',size=text_size)
fig.colorbar.set_axis_label_font(family='sans_serif',size=text_size)
fig.set_nan_color('0.95')
#
fig.show_contour(w11_hdu, colors='gray', levels=cont_levs, linewidths=w11_lw)
# Axis labels
fig.axis_labels.set_font(family='sans_serif',size=text_size)
# Ticks
fig.ticks.set_color(text_color)
fig.tick_labels.set_font(family='sans_serif',size=text_size)
fig.tick_labels.set_style('colons')
fig.tick_labels.set_xformat('hh:mm:ss')
fig.tick_labels.set_yformat('dd:mm')
# Add beam
fig.add_beam(major=0.0088441,minor=0.0088441,angle=0)
fig.beam.set_color(beam_color)
fig.beam.set_corner('bottom left')
# Scale bar
# magic line of code to obtain scale in arcsec obtained from
# http://www.astropy.org/astropy-tutorials/Quantities.html
ang_sep = (plot_param['scalebar_size'].to(u.au)/plot_param['distance']).to(u.arcsec, equivalencies=dimensionless_angles())
fig.add_scalebar(ang_sep.to(u.degree))
fig.scalebar.set_corner(plot_param['scalebar_pos'])
fig.scalebar.set_font(family='sans_serif',size=text_size)
fig.scalebar.set(color=text_color)
fig.scalebar.set_label('{0:4.2f}'.format(plot_param['scalebar_size']))
# Labels
fig.add_label(0.025, 0.95,
'{0}\n{1}'.format(region,label_i),
relative=True, color=text_color,
horizontalalignment='left',
family='sans_serif',size=text_size)
# fig.set_system_latex(True)
fig.save( 'figures/{0}_{1}_{2}_mom0_QA_map.pdf'.format(region,line_i,extension),adjust_bbox=True,dpi=200)
fig.close()
else:
print('File {0} not found'.format(file_mom0))
else:
print('File {0} not found'.format(file_w11))
def plot_rms_QA(file_extension='base_all'):
# Get list of regions - run from images/ directory
# Assume directories correspond to regions to be imaged
# Update - use catalog?
region_list = glob.glob("*/")
for i in range(len(region_list)):
region_list[i] = region_list[i].strip("/")
line_list = ['NH3_11','NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
label_list = ['NH$_3$(1,1)','NH$_3$(2,2)','NH$_3$(3,3)','C$_2$S','HC$_5$N',
'HC$_7$N (21-20)','HC$_7$N (22-21)']
extension = file_extension
color_table='magma'
text_color='black'
text_size = 12
beam_color='#d95f02' # previously used '#E31A1C'
# Masking of small (noisy) regions
selem = np.array([[0,1,0],[1,1,1],[0,1,0]])
for region in region_list:
test_rebase = '{0}/{0}_NH3_11_{1}_rebase3_rms_QA.fits'.format(region,file_extension)
if os.path.isfile(test_rebase):
extension = '{0}_rebase3'.format(file_extension)
else:
extension = file_extension
file_r11='{0}/{0}_NH3_11_{1}_rms_QA.fits'.format(region,extension)
if os.path.isfile(file_r11):
for i in range(len(line_list)):
line_i=line_list[i]
label_i=label_list[i]
file_rms='{0}/{0}_{1}_{2}_rms_QA.fits'.format(region,line_i,extension)
if os.path.isfile(file_rms):
line_hdu = fits.open(file_rms)
# Use percentiles to set initial plot colourscale ranges
v_min=np.nanpercentile(line_hdu[0].data,0.1)
v_max=np.nanpercentile(line_hdu[0].data,99.)
fig=aplpy.FITSFigure(file_rms, hdu=0)
fig.show_colorscale( cmap=color_table,vmin=v_min, vmax=v_max)
# add colorbar
fig.add_colorbar()
#fig.colorbar.set_width(0.15)
fig.colorbar.show( box_orientation='horizontal', width=0.1, pad=0.0,
location='top', axis_label_text='(K)')
fig.colorbar.set_font(family='sans_serif',size=text_size)
fig.colorbar.set_axis_label_font(family='sans_serif',size=text_size)
fig.set_nan_color('0.95')
# Axis labels
fig.axis_labels.set_font(family='sans_serif',size=text_size)
# Ticks
fig.ticks.set_color(text_color)
fig.tick_labels.set_font(family='sans_serif',size=text_size)
fig.tick_labels.set_style('colons')
fig.tick_labels.set_xformat('hh:mm:ss')
fig.tick_labels.set_yformat('dd:mm')
# Add beam
fig.add_beam(major=0.0088441,minor=0.0088441,angle=0)
fig.beam.set_color(beam_color)
fig.beam.set_corner('bottom left')
'''
# Scale bar
# magic line of code to obtain scale in arcsec obtained from
# http://www.astropy.org/astropy-tutorials/Quantities.html
ang_sep = (plot_param['scalebar_size'].to(u.au)/plot_param['distance']).to(u.arcsec, equivalencies dimensionless_angles())
fig.add_scalebar(ang_sep.to(u.degree))
fig.scalebar.set_corner(plot_param['scalebar_pos'])
fig.scalebar.set_font(family='sans_serif',size=text_size)
fig.scalebar.set(color=text_color)
fig.scalebar.set_label('{0:4.2f}'.format(plot_param['scalebar_size']))
'''
# Labels
fig.add_label(0.025, 0.1,
'{0}\n{1}'.format(region,label_i),
relative=True, color=text_color,
horizontalalignment='left',
family='sans_serif',size=text_size)
# fig.set_system_latex(True)
fig.save( 'figures/{0}_{1}_{2}_rms_QA_map.pdf'.format(region,line_i,extension),adjust_bbox=True)
fig.close()
else:
print('File {0} not found'.format(file_rms))
else:
print('File {0} not found'.format(file_r11))
def plot_property_maps(file_extension='base_all'):
# Get list of regions - run from images/ directory
# Assume directories correspond to regions to be imaged
# Update - use catalog?
region_list = glob.glob("*/")
for i in range(len(region_list)):
region_list[i] = region_list[i].strip("/")
ext_list = [4,3,0,1,2]
label_list = ['$v_{LSR}$ (km s$^{-1}$)','$\sigma_v$ (km s$^{-1}$)','$T_K$ (K)','$T_{ex}$ (K)','log N(para-NH$_3$)']
file_list = ['vlsr','sigv','Tk','Tex','N_NH3']
ctable_list = ['RdYlBu_r','Blues_r','plasma','hot','plasma'] #'YlGnBu_r'
text_color='black'
text_size = 12
beam_color='#d95f02' # previously used '#E31A1C'
# Try single set of contours for first look images
w11_step = 0.4
cont_levs=2**np.arange( 0,20)*w11_step
w11_lw = 0.5
# Masking of small (noisy) regions
selem = np.array([[0,1,0],[1,1,1],[0,1,0]])
for region in region_list:
print region
# Want to use updated, rebaselined moment maps where available:
test_rebase = '{0}/{0}_NH3_11_{1}_rebase3_mom0_QA.fits'.format(region,file_extension)
if os.path.isfile(test_rebase):
extension = '{0}_rebase3'.format(file_extension)
else:
extension = file_extension
file_w11='{0}/{0}_NH3_11_{1}_mom0_QA.fits'.format(region,extension)
if not os.path.isfile(file_w11):
file_w11 = '{0}/{0}_NH3_11_{1}_mom0.fits'.format(region,extension)
data_file = '{0}/{0}_parameter_maps_{1}.fits'.format(region,extension)
if os.path.isfile(data_file):
hdu = fits.open(data_file)
data = hdu[0].data
header = hdu[0].header
hdu.close()
rm_key=['NAXIS3','CRPIX3','CDELT3', 'CUNIT3', 'CTYPE3', 'CRVAL3']
# Set header to 2D image for plotting
for key_i in rm_key:
header.remove(key_i)
header['NAXIS'] = 2
header['WCSAXES'] = 2
# Get NH3 (1,1) moment contours
LowestContour= cont_levs[0]*0.5
w11_hdu = fits.open(file_w11)
map = w11_hdu[0].data
mask = binary_opening(map > LowestContour, selem)
MaskedMap = mask*map
w11_hdu[0].data = MaskedMap
for i in range(len(ext_list)):
propmap = data[ext_list[i]]
maskedProp = propmap * mask
maskedProp[maskedProp == 0] = np.nan
prop_hdu = fits.PrimaryHDU(maskedProp,header)
label = label_list[i]
# Use percentiles to set initial plot colourscale ranges without masking
# Will likely mask later
# Need different percentiles for vlsr vs. Tk
if ext_list[i] in [2,3,4]:
v_min=np.nanpercentile(maskedProp,2.5)
v_max=np.nanpercentile(maskedProp,97.5)
else:
if region == 'OrionA':
v_min=5.
v_max = 40.
else:
v_min = np.nanpercentile(maskedProp,5)
v_max = np.nanpercentile(maskedProp,95)
fig=aplpy.FITSFigure(prop_hdu,)
fig.show_colorscale(cmap=ctable_list[i],vmin=v_min, vmax=v_max)
# add colorbar
fig.add_colorbar()
fig.colorbar.show(box_orientation='horizontal', width=0.1, pad=0.0,# ticks=cbar_ticks,
location='top')#, axis_label_text='Integrated Intensity (K km s$^{-1}$)')
fig.colorbar.set_font(family='sans_serif',size=text_size)
fig.colorbar.set_axis_label_font(family='sans_serif',size=text_size)
fig.set_nan_color('0.99')
#
fig.show_contour(w11_hdu, colors='gray', levels=cont_levs, linewidths=w11_lw)
# Axis labels
fig.axis_labels.set_font(family='sans_serif',size=text_size)
# Ticks
fig.ticks.set_color(text_color)
fig.tick_labels.set_font(family='sans_serif',size=text_size)
fig.tick_labels.set_style('colons')
fig.tick_labels.set_xformat('hh:mm:ss')
fig.tick_labels.set_yformat('dd:mm')
# Add beam
fig.add_beam(major=0.0088441,minor=0.0088441,angle=0)
fig.beam.set_color(beam_color)
fig.beam.set_corner('bottom left')
'''
# Scale bar
# magic line of code to obtain scale in arcsec obtained from
# http://www.astropy.org/astropy-tutorials/Quantities.html
#ang_sep = (plot_param['scalebar_size'].to(u.au)/plot_param['distance']).to(u.arcsec, equivalencies dimensionless_angles())
fig.add_scalebar(ang_sep.to(u.degree))
fig.scalebar.set_corner(plot_param['scalebar_pos'])
fig.scalebar.set_font(family='sans_serif',size=text_size)
fig.scalebar.set(color=text_color)
fig.scalebar.set_label('{0:4.2f}'.format(plot_param['scalebar_size']))
'''
# Labels
fig.add_label(0.025, 0.95,
'{0}\n{1}'.format(region,label),
relative=True, color=text_color,
horizontalalignment='left',
family='sans_serif',size=text_size)
# fig.set_system_latex(True)
fig.save( 'figures/{0}_{1}_{2}.pdf'.format(region,extension,file_list[i]),adjust_bbox=True,dpi=200)
fig.close()
else:
print('File {0} not found'.format(data_file))
|
|
# -*- coding: utf-8 -*-
'''This module provides a registry for skos providers.
This registry helps us find providers during runtime. We can also apply some
operations to all or several providers at the same time.
'''
import logging
log = logging.getLogger(__name__)
from .uri import is_uri
class RegistryException(Exception):
pass
class Registry:
'''
This registry collects all skos providers.
'''
providers = {}
'''
Dictionary containing all providers, keyed by id.
'''
concept_scheme_uri_map = {}
'''
Dictionary mapping concept scheme uri's to vocabulary id's.
'''
metadata = {}
'''
Dictionary containing metadata about this registry.
'''
instance_scope = 'single'
'''
Indicates how the registry is being used. Options:
- single: The registry is part of a script or a single process. It can
be assumed to be operational for the entire duration of the process
and there are no threads involved.
- threaded_global: The registry is part of a program that uses threads,
such as a typical web application. It's attached to the global process
and duplicated to threads, making it not thread safe. Proceed carefully
with certain providers. Should generally only be used with
applications that only use read-only providers that load all data in
memory at startup and use no database connections or other kinds of
sessions.
- threaded_thread: The registry is part of a program that uses threads,
such as a typical web application. It's attached to a thread, such as
a web request. The registry is instantiated for this thread/request and
dies with this thread/request. This is needed for providers such
as the SQLAlchemyProvider. Providers that use database connections or
other session handling code generally require this.
'''
def __init__(self, instance_scope='single', metadata={}):
'''
:param str instance_scope: Indicates how the registry was instantiated.
Possible values: single, threaded_global, threaded_thread.
:param dict metadata: Metadata essential to this registry. Possible
metadata:
* `catalog`: A :class:`dict` detailing the catalog all \
conceptschemes are part of. \
Currently the contents of the dictionary are undefined \
except for a :term:`uri` attribute that must be present.
* `dataset`: A :class:`dict` detailing the dataset all \
conceptschemes are part of. \
Currently the contents of the dictionary are undefined \
except for a :term:`uri` attribute that must be present.
'''
self.providers = {}
self.concept_scheme_uri_map = {}
self.metadata = metadata
if instance_scope not in ['single', 'threaded_global', 'threaded_thread']:
raise ValueError('Invalid instance_scope.')
self.instance_scope = instance_scope
def get_metadata(self):
'''Get some metadata on the registry it represents.
:rtype: Dict.
'''
return self.metadata
def register_provider(self, provider):
'''
Register a :class:`skosprovider.providers.VocabularyProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
to register.
:raises RegistryException: A provider with this id or uri has already
been registered.
'''
if (
provider.allowed_instance_scopes
and self.instance_scope not in provider.allowed_instance_scopes
):
raise RegistryException(
'This provider does not support instance_scope %s' % self.instance_scope
)
if provider.get_vocabulary_id() in self.providers:
raise RegistryException(
'A provider with this id has already been registered.'
)
self.providers[provider.get_vocabulary_id()] = provider
try:
cs_uri = provider.get_vocabulary_uri()
except AttributeError as e:
log.error(e)
# For providers not compatible with skosprovider >= 0.8.0
log.warning(
'New versions of skosprovider (>=0.8.0) require your provider '
'to have a get_vocabulary_uri method. This fallback mechanism '
'will be removed in version 2.0.0.'
)
cs_uri = provider.concept_scheme.uri
if cs_uri in self.concept_scheme_uri_map:
raise RegistryException(
'A provider with URI %s has already been registered.' % cs_uri
)
self.concept_scheme_uri_map[cs_uri] = provider.get_vocabulary_id()
def remove_provider(self, id):
'''
Remove the provider with the given id or :term:`URI`.
:param str id: The identifier for the provider.
:returns: A :class:`skosprovider.providers.VocabularyProvider` or
`False` if the id is unknown.
'''
if id in self.providers:
p = self.providers.get(id, False)
del self.providers[id]
try:
cs_uri = p.get_vocabulary_uri()
except AttributeError as e:
log.error(e)
# For providers not compatible with skosprovider >= 0.8.0
log.warning(
'New versions of skosprovider (>=0.8.0) require your provider '
'to have a get_vocabulary_uri method. This fallback mechanism '
'will be removed in version 2.0.0.'
)
# For providers not compatible with skosprovider >= 0.8.0
cs_uri = p.concept_scheme.uri
del self.concept_scheme_uri_map[cs_uri]
return p
elif id in self.concept_scheme_uri_map:
id = self.concept_scheme_uri_map[id]
return self.remove_provider(id)
else:
return False
def get_provider(self, id):
'''
Get a provider by id or :term:`URI`.
:param str id: The identifier for the provider. This can either be the
id with which it was registered or the :term:`URI` of the conceptscheme
that the provider services.
:returns: A :class:`skosprovider.providers.VocabularyProvider`
or `False` if the id or uri is unknown.
'''
if id in self.providers:
return self.providers.get(id, False)
elif is_uri(id) and id in self.concept_scheme_uri_map:
return self.providers.get(self.concept_scheme_uri_map[id], False)
return False
def get_providers(self, **kwargs):
'''Get all providers registered.
If keyword `ids` is present, get only the providers with these ids.
If keys `subject` is present, get only the providers that have this subject.
.. code-block:: python
# Get all providers with subject 'biology'
registry.get_providers(subject='biology')
# Get all providers with id 1 or 2
registry.get_providers(ids=[1,2])
# Get all providers with id 1 or 2 and subject 'biology'
registry.get_providers(ids=[1,2], subject='biology']
:param list ids: Only return providers with one of the Ids or :term:`URIs <URI>`.
:param str subject: Only return providers with this subject.
:returns: A list of :class:`providers <skosprovider.providers.VocabularyProvider>`
'''
if 'ids' in kwargs:
ids = [self.concept_scheme_uri_map.get(id, id) for id in kwargs['ids']]
providers = [
self.providers[k] for k in self.providers.keys() if k in ids
]
else:
providers = list(self.providers.values())
if 'subject' in kwargs:
providers = [p for p in providers if kwargs['subject'] in p.metadata['subject']]
return providers
def find(self, query, **kwargs):
'''Launch a query across all or a selection of providers.
.. code-block:: python
# Find anything that has a label of church in any provider.
registry.find({'label': 'church'})
# Find anything that has a label of church with the BUILDINGS provider.
# Attention, this syntax was deprecated in version 0.3.0
registry.find({'label': 'church'}, providers=['BUILDINGS'])
# Find anything that has a label of church with the BUILDINGS provider.
registry.find({'label': 'church'}, providers={'ids': ['BUILDINGS']})
# Find anything that has a label of church with a provider
# marked with the subject 'architecture'.
registry.find({'label': 'church'}, providers={'subject': 'architecture'})
# Find anything that has a label of church in any provider.
# If possible, display the results with a Dutch label.
registry.find({'label': 'church'}, language='nl')
# Find anything that has a match with an external concept
# If possible, display the results with a Dutch label.
registry.find({
'matches': {
'uri': 'http://id.python.org/different/types/of/trees/nr/1/the/larch'
}}, language='nl')
# Find anything that has a label of lariks with a close match to an external concept
# If possible, display the results with a Dutch label.
provider.find({
'matches': {
'label': 'lariks',
'type': 'close',
'uri': 'http://id.python.org/different/types/of/trees/nr/1/the/larch'
}}, language='nl')
:param dict query: The query parameters that will be passed on to each
:meth:`~skosprovider.providers.VocabularyProvider.find` method of
the selected.
:class:`providers <skosprovider.providers.VocabularyProvider>`.
:param dict providers: Optional. If present, it should be a dictionary.
This dictionary can contain any of the keyword arguments available
to the :meth:`get_providers` method. The query will then only
be passed to the providers confirming to these arguments.
:param string language: Optional. If present, it should be a
:term:`language-tag`. This language-tag is passed on to the
underlying providers and used when selecting the label to display
for each concept.
:returns: a list of :class:`dict`.
Each dict has two keys: id and concepts.
'''
if 'providers' not in kwargs:
providers = self.get_providers()
else:
pargs = kwargs['providers']
if isinstance(pargs, list):
providers = self.get_providers(ids=pargs)
else:
providers = self.get_providers(**pargs)
kwarguments = {}
if 'language' in kwargs:
kwarguments['language'] = kwargs['language']
return [{'id': p.get_vocabulary_id(), 'concepts': p.find(query, **kwarguments)}
for p in providers]
def get_all(self, **kwargs):
'''Get all concepts from all providers.
.. code-block:: python
# get all concepts in all providers.
registry.get_all()
# get all concepts in all providers.
# If possible, display the results with a Dutch label.
registry.get_all(language='nl')
:param string language: Optional. If present, it should be a
:term:`language-tag`. This language-tag is passed on to the
underlying providers and used when selecting the label to display
for each concept.
:returns: a list of :class:`dict`.
Each dict has two keys: id and concepts.
'''
kwarguments = {}
if 'language' in kwargs:
kwarguments['language'] = kwargs['language']
return [{'id': p.get_vocabulary_id(), 'concepts': p.get_all(**kwarguments)}
for p in self.providers.values()]
def get_by_uri(self, uri):
'''Get a concept or collection by its uri.
Returns a single concept or collection if one exists with this uri.
Returns False otherwise.
:param string uri: The uri to find a concept or collection for.
:raises ValueError: The uri is invalid.
:rtype: :class:`skosprovider.skos.Concept` or
:class:`skosprovider.skos.Collection`
'''
if not is_uri(uri):
raise ValueError('%s is not a valid URI.' % uri)
# Check if there's a provider that's more likely to have the URI
csuris = [csuri for csuri in self.concept_scheme_uri_map.keys() if uri.startswith(csuri)]
for csuri in csuris:
c = self.get_provider(csuri).get_by_uri(uri)
if c:
return c
# Check all providers
for p in self.providers.values():
c = p.get_by_uri(uri)
if c:
return c
return False
|
|
"""
Execute raw graph to ATerm after inference but before evaluation.
"""
import string
import numpy as np
from collections import namedtuple
from blaze.datashape.coretypes import DataShape
from blaze.byteproto import CONTIGUOUS, READ
from blaze.expr.paterm import AAppl, ATerm, AAnnotation, AString, AInt, AFloat
from blaze.expr.visitor import MroVisitor
#------------------------------------------------------------------------
# Plans
#------------------------------------------------------------------------
# Annotate with simple_type() which will pull the simple type of
# the App, Op, or Literal node before we even hit eval(). This is
# a stop gap measure because right we're protoyping this without
# having the types annotated on the graph that we're using for
# Numba code generation. It's still probably a good idea to have
# this knowledge available if we have it though!
def annotate_dshape(ds):
"""
Convert a datashape instance into Aterm annotation
>>> ds = dshape('2, 2, int32')
>>> anno = dshape_anno(ds)
dshape("2, 2, int32")
>>> type(anno)
<class 'AAppl'>
"""
assert isinstance(ds, DataShape)
return AAppl(ATerm('dshape'), [AString(str(ds))])
def annotation(graph, *metadata):
# Metadata holds a reference to the graph node, not really
# what we want but fine for now...
metadata = (id(graph),) + metadata
# was originally .datashape but this is a reserved attribute
# so moved to a new simple_type() method that wraps around
# promote()
anno = annotate_dshape(graph.simple_type())
annotation = AAnnotation(anno, metadata)
return annotation
#------------------------------------------------------------------------
# ATerm -> Instructions
#------------------------------------------------------------------------
class Constant(object):
def __init__(self, n):
self.n = n
def __repr__(self):
return 'const(%s)' % self.n
class Var(object):
def __init__(self, key):
self.key = key
def __repr__(self):
return self.key
class Instruction(object):
def __init__(self, fn, args=None, lhs=None):
""" %lhs = fn{props}(arguments) """
self.fn = fn
self.lhs = lhs
self.args = args or []
def __repr__(self):
# with output types
if self.lhs:
return self.lhs + ' = ' + \
' '.join([self.fn,] + map(repr, self.args))
# purely side effectful
else:
return ' '.join([self.fn,] + map(repr, self.args))
# TODO: naive constant folding
class InstructionGen(MroVisitor):
""" Map ATerm into linear instructions, unlike ATerm this
does not preserve the information contained in the expression
graph, information is discarded.
Maintains a stack as the nodes are visited, the instructions
for the innermost term are top on the stack. The temporaries
are mapped through the vartable.
::
a + b * c
::
instructions = [
%3 = <ufunc 'multiply'> %1 %2,
%4 = <ufunc 'add'> %0 %3
]
vartable = {
Array(){dshape("2, 2, int32"),54490464} : '%0',
Array(){dshape("2, 2, float32"),54490176} : '%1',
Array(){dshape("2, 2, int32"),54491184} : '%2',
...
}
"""
# TODO: markf comments: this gives us an all-or-nothing approach
# either "all-numba" or "all-something else". FIX this
def __init__(self, have_numbapro):
self.numbapro = have_numbapro
self.n = 0
self._vartable = {}
self._instructions = []
def result(self):
return self._instructions
@property
def vars(self):
return self._vartable
def var(self, term):
key = ('%' + str(self.n))
self._vartable[term] = key
self.n += 1
return key
def AAppl(self, term):
label = term.spine.label
if label == 'Arithmetic':
return self._Arithmetic(term)
elif label == 'Array':
return self._Array(term)
elif label == 'Slice':
return self._Slice(term)
elif label == 'Assign':
return self._Assign(term)
else:
raise NotImplementedError
def _Arithmetic(self, term):
# All the function signatures are of the form
#
# Add(a,b)
#
# But the aterm expression for LLVM is expected to be
#
# Arithmetic(Add, ...)
#
# so we do this ugly hack to get the signature back to
# standard form
# -- hack --
op = term.args[0]
args = term.args[1:]
normal_term = AAppl(ATerm(op), args)
# --
assert isinstance(op, ATerm)
label = op.label
if self.numbapro:
pass
# ==================================================
# TODO: right here is where we would call the
# ExecutionPipeline and build a numba ufunc kernel
# if we have numbapro. We would pass in the original
# ``term`` object which is still of the expected form:
#
# Arithmetic(Add, ...)
# ==================================================
# otherwise, go find us implementation for how to execute
# Returns either a ExternalF ( reference to a external C
# library ) or a PythonF, a Python callable. These can be
# anything, numpy ufuncs, numexpr, pandas, cmath whatever
from blaze.rts.funcs import lookup
# visit the innermost arguments, push those arguments on
# the instruction list first
self.visit(args)
fn, cost = lookup(normal_term)
fargs = [self._vartable[a] for a in args]
# push the temporary for the result in the vartable
key = self.var(term)
# build the instruction & push it on the stack
inst = Instruction(str(fn.fn), fargs, lhs=key)
self._instructions.append(inst)
def _Array(self, term):
key = self.var(term)
return Var(key)
def _Assign(self, term):
pass
def _Slice(self, term):
pass
def AInt(self, term):
self._vartable[term] = Constant(term.n)
return
def AFloat(self, term):
self._vartable[term] = Constant(term.n)
return
def ATerm(self, term):
return
#------------------------------------------------------------------------
# Graph -> ATerm
#------------------------------------------------------------------------
class BlazeVisitor(MroVisitor):
""" Map Blaze graph objects into ATerm """
def __init__(self):
self.operands = []
def App(self, graph):
return self.visit(graph.operator)
def Fun(self, graph):
return self.visit(graph.children)
def Op(self, graph):
opname = graph.__class__.__name__
if graph.is_arithmetic:
return AAppl(ATerm('Arithmetic'),
[ATerm(opname)] + self.visit(graph.children),
annotation=annotation(graph))
else:
return AAppl(ATerm(opname), self.visit(graph.children),
annotation=annotation(graph))
def Literal(self, graph):
if graph.vtype == int:
return AInt(graph.val, annotation=annotation(graph))
if graph.vtype == float:
return AFloat(graph.val, annotation=annotation(graph))
else:
return ATerm(graph.val, annotation=annotation(graph))
def Indexable(self, graph):
self.operands.append(graph)
return AAppl(ATerm('Array'), [], annotation=annotation(graph))
def Slice(self, graph):
# Slice(start, stop, step){id(graph), 'get'|'set'}
array, start, stop, step = graph.operands
if start:
start = self.visit(start)
if stop:
stop = self.visit(stop)
if step:
step = self.visit(step)
return AAppl(
ATerm('Slice'),
[self.visit(array),
start or ATerm('None'),
stop or ATerm('None'),
step or ATerm('None')],
annotation=annotation(graph, graph.op)
)
def IndexNode(self, graph):
return AAppl(ATerm('Index'), self.visit(graph.operands),
annotation=annotation(graph, graph.op))
def Assign(self, graph):
return AAppl(ATerm('Assign'), self.visit(graph.operands),
annotation=annotation(graph))
|
|
from test.test_support import verify, TestFailed, check_syntax, vereq
import warnings
warnings.filterwarnings("ignore", r"import \*", SyntaxWarning, "<string>")
print "1. simple nesting"
def make_adder(x):
def adder(y):
return x + y
return adder
inc = make_adder(1)
plus10 = make_adder(10)
vereq(inc(1), 2)
vereq(plus10(-2), 8)
print "2. extra nesting"
def make_adder2(x):
def extra(): # check freevars passing through non-use scopes
def adder(y):
return x + y
return adder
return extra()
inc = make_adder2(1)
plus10 = make_adder2(10)
vereq(inc(1), 2)
vereq(plus10(-2), 8)
print "3. simple nesting + rebinding"
def make_adder3(x):
def adder(y):
return x + y
x = x + 1 # check tracking of assignment to x in defining scope
return adder
inc = make_adder3(0)
plus10 = make_adder3(9)
vereq(inc(1), 2)
vereq(plus10(-2), 8)
print "4. nesting with global but no free"
def make_adder4(): # XXX add exta level of indirection
def nest():
def nest():
def adder(y):
return global_x + y # check that plain old globals work
return adder
return nest()
return nest()
global_x = 1
adder = make_adder4()
vereq(adder(1), 2)
global_x = 10
vereq(adder(-2), 8)
print "5. nesting through class"
def make_adder5(x):
class Adder:
def __call__(self, y):
return x + y
return Adder()
inc = make_adder5(1)
plus10 = make_adder5(10)
vereq(inc(1), 2)
vereq(plus10(-2), 8)
print "6. nesting plus free ref to global"
def make_adder6(x):
global global_nest_x
def adder(y):
return global_nest_x + y
global_nest_x = x
return adder
inc = make_adder6(1)
plus10 = make_adder6(10)
vereq(inc(1), 11) # there's only one global
vereq(plus10(-2), 8)
print "7. nearest enclosing scope"
def f(x):
def g(y):
x = 42 # check that this masks binding in f()
def h(z):
return x + z
return h
return g(2)
test_func = f(10)
vereq(test_func(5), 47)
print "8. mixed freevars and cellvars"
def identity(x):
return x
def f(x, y, z):
def g(a, b, c):
a = a + x # 3
def h():
# z * (4 + 9)
# 3 * 13
return identity(z * (b + y))
y = c + z # 9
return h
return g
g = f(1, 2, 3)
h = g(2, 4, 6)
vereq(h(), 39)
print "9. free variable in method"
def test():
method_and_var = "var"
class Test:
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
return Test()
t = test()
vereq(t.test(), "var")
vereq(t.method_and_var(), "method")
vereq(t.actual_global(), "global")
method_and_var = "var"
class Test:
# this class is not nested, so the rules are different
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
t = Test()
vereq(t.test(), "var")
vereq(t.method_and_var(), "method")
vereq(t.actual_global(), "global")
print "10. recursion"
def f(x):
def fact(n):
if n == 0:
return 1
else:
return n * fact(n - 1)
if x >= 0:
return fact(x)
else:
raise ValueError, "x must be >= 0"
vereq(f(6), 720)
print "11. unoptimized namespaces"
check_syntax("""\
def unoptimized_clash1(strip):
def f(s):
from string import *
return strip(s) # ambiguity: free or local
return f
""")
check_syntax("""\
def unoptimized_clash2():
from string import *
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
check_syntax("""\
def unoptimized_clash2():
from string import *
def g():
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
# XXX could allow this for exec with const argument, but what's the point
check_syntax("""\
def error(y):
exec "a = 1"
def f(x):
return x + y
return f
""")
check_syntax("""\
def f(x):
def g():
return x
del x # can't del name
""")
check_syntax("""\
def f():
def g():
from string import *
return strip # global or local?
""")
# and verify a few cases that should work
exec """
def noproblem1():
from string import *
f = lambda x:x
def noproblem2():
from string import *
def f(x):
return x + 1
def noproblem3():
from string import *
def f(x):
global y
y = x
"""
print "12. lambdas"
f1 = lambda x: lambda y: x + y
inc = f1(1)
plus10 = f1(10)
vereq(inc(1), 2)
vereq(plus10(5), 15)
f2 = lambda x: (lambda : lambda y: x + y)()
inc = f2(1)
plus10 = f2(10)
vereq(inc(1), 2)
vereq(plus10(5), 15)
f3 = lambda x: lambda y: global_x + y
global_x = 1
inc = f3(None)
vereq(inc(2), 3)
f8 = lambda x, y, z: lambda a, b, c: lambda : z * (b + y)
g = f8(1, 2, 3)
h = g(2, 4, 6)
vereq(h(), 18)
print "13. UnboundLocal"
def errorInOuter():
print y
def inner():
return y
y = 1
def errorInInner():
def inner():
return y
inner()
y = 1
try:
errorInOuter()
except UnboundLocalError:
pass
else:
raise TestFailed
try:
errorInInner()
except NameError:
pass
else:
raise TestFailed
# test for bug #1501934: incorrect LOAD/STORE_GLOBAL generation
global_x = 1
def f():
global_x += 1
try:
f()
except UnboundLocalError:
pass
else:
raise TestFailed, 'scope of global_x not correctly determined'
print "14. complex definitions"
def makeReturner(*lst):
def returner():
return lst
return returner
vereq(makeReturner(1,2,3)(), (1,2,3))
def makeReturner2(**kwargs):
def returner():
return kwargs
return returner
vereq(makeReturner2(a=11)()['a'], 11)
def makeAddPair((a, b)):
def addPair((c, d)):
return (a + c, b + d)
return addPair
vereq(makeAddPair((1, 2))((100, 200)), (101,202))
print "15. scope of global statements"
# Examples posted by Samuele Pedroni to python-dev on 3/1/2001
# I
x = 7
def f():
x = 1
def g():
global x
def i():
def h():
return x
return h()
return i()
return g()
vereq(f(), 7)
vereq(x, 7)
# II
x = 7
def f():
x = 1
def g():
x = 2
def i():
def h():
return x
return h()
return i()
return g()
vereq(f(), 2)
vereq(x, 7)
# III
x = 7
def f():
x = 1
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
vereq(f(), 2)
vereq(x, 2)
# IV
x = 7
def f():
x = 3
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
vereq(f(), 2)
vereq(x, 2)
# XXX what about global statements in class blocks?
# do they affect methods?
x = 12
class Global:
global x
x = 13
def set(self, val):
x = val
def get(self):
return x
g = Global()
vereq(g.get(), 13)
g.set(15)
vereq(g.get(), 13)
print "16. check leaks"
class Foo:
count = 0
def __init__(self):
Foo.count += 1
def __del__(self):
Foo.count -= 1
def f1():
x = Foo()
def f2():
return x
f2()
for i in range(100):
f1()
vereq(Foo.count, 0)
print "17. class and global"
def test(x):
class Foo:
global x
def __call__(self, y):
return x + y
return Foo()
x = 0
vereq(test(6)(2), 8)
x = -1
vereq(test(3)(2), 5)
looked_up_by_load_name = False
class X:
# Implicit globals inside classes are be looked up by LOAD_NAME, not
# LOAD_GLOBAL.
locals()['looked_up_by_load_name'] = True
passed = looked_up_by_load_name
verify(X.passed)
print "18. verify that locals() works"
def f(x):
def g(y):
def h(z):
return y + z
w = x + y
y += 3
return locals()
return g
d = f(2)(4)
verify(d.has_key('h'))
del d['h']
vereq(d, {'x': 2, 'y': 7, 'w': 6})
print "19. var is bound and free in class"
def f(x):
class C:
def m(self):
return x
a = x
return C
inst = f(3)()
vereq(inst.a, inst.m())
print "20. interaction with trace function"
import sys
def tracer(a,b,c):
return tracer
def adaptgetter(name, klass, getter):
kind, des = getter
if kind == 1: # AV happens when stepping from this line to next
if des == "":
des = "_%s__%s" % (klass.__name__, name)
return lambda obj: getattr(obj, des)
class TestClass:
pass
sys.settrace(tracer)
adaptgetter("foo", TestClass, (1, ""))
sys.settrace(None)
try: sys.settrace()
except TypeError: pass
else: raise TestFailed, 'sys.settrace() did not raise TypeError'
print "20. eval and exec with free variables"
def f(x):
return lambda: x + 1
g = f(3)
try:
eval(g.func_code)
except TypeError:
pass
else:
print "eval() should have failed, because code contained free vars"
try:
exec g.func_code
except TypeError:
pass
else:
print "exec should have failed, because code contained free vars"
print "21. list comprehension with local variables"
try:
print bad
except NameError:
pass
else:
print "bad should not be defined"
def x():
[bad for s in 'a b' for bad in s.split()]
x()
try:
print bad
except NameError:
pass
print "22. eval with free variables"
def f(x):
def g():
x
eval("x + 1")
return g
f(4)()
|
|
from Firefly import logging
from Firefly.const import (EVENT_ACTION_ANY, EVENT_ACTON_TYPE, TIME)
from typing import List
class Subscriptions(object):
"""Subscriptions.
Subscriptions should be stored like so:
{ DEVICE_SUBSCRIBED_TO: {EVENT_ACTION: [LIST_OF_SUBSCRIBERS], ...} }
"""
# TODO: Add functionality to export subscriptions to json file.
def __init__(self):
self.subscriptions = {}
# TODO: Add functionality to import subscriptions from json file.
def get_subscribers(self, subscribe_to_id: str, event_action: dict = {EVENT_ACTION_ANY:EVENT_ACTION_ANY}) -> List[str]:
"""Gets a list of subscribers.
Returns a list subscriber IDs that are subscribed to the ff_id passed in for the event types that are pass and any
subscriber that is listening to the EVENT_ACTION_ANY.
Args:
subscribe_to_id (str): subscriber ID
event_action (list): list of event types to listen for
Returns:
list: List of subscriber IDs
"""
_event_action = event_action.copy()
_event_action = verify_event_action(_event_action, get_subscribers=True)
try:
subscriptions = self.subscriptions[subscribe_to_id]
except KeyError:
logging.warn('Component (%s) not found in subscriptions' % subscribe_to_id)
return []
# Get subscribers listening to Anything
try:
subscribers_any = set(subscriptions[EVENT_ACTION_ANY][EVENT_ACTION_ANY])
except:
subscribers_any = set()
subscribers_prop_any = set()
subscribers = set()
for ea in _event_action:
if type(ea) is not dict:
logging.error(code='FF.SUB.GET.001', args=(str(ea))) # event action is not type dict: %s=
continue
for prop, act in ea.items():
# Get any subscribers listening for any of that prop
try:
subscribers_prop_any.update(subscriptions[prop][EVENT_ACTION_ANY])
except:
pass
# Get subscribers for the prop and action
for a in act:
try:
subscribers.update(subscriptions[prop][a])
except:
pass
subscribers.update(subscribers_any, subscribers_prop_any)
return list(subscribers)
def add_subscriber(self, subscriber_id: str, subscribe_to_id: str,
event_action: EVENT_ACTON_TYPE = EVENT_ACTION_ANY) -> None:
"""Add a subscriber.
This adds a new subscriber.
Args:
subscriber_id (str): subscriber id.
subscribe_to_id (str): id of ff_id listening to.
event_action (dict): The event types to listen to.
"""
event_action = verify_event_action(event_action)
if subscribe_to_id not in self.subscriptions:
logging.info('%s not in subscriptions. Adding new subscription.' % subscribe_to_id)
self.subscriptions[subscribe_to_id] = {}
subscriptions = self.subscriptions[subscribe_to_id]
for ea in event_action:
# Deal with EVENT_ACTION_ANY.
if type(ea) is str:
if ea == EVENT_ACTION_ANY:
if EVENT_ACTION_ANY not in subscriptions:
subscriptions[EVENT_ACTION_ANY] = {}
if event_action[ea] not in subscriptions[EVENT_ACTION_ANY]:
subscriptions[EVENT_ACTION_ANY][event_action[ea]] = []
if subscriber_id not in subscriptions[EVENT_ACTION_ANY][event_action[ea]]:
subscriptions[EVENT_ACTION_ANY][event_action[ea]].append(subscriber_id)
# Deal with a dict of event actions.
if type(ea) is dict:
for evt, act in ea.items():
if evt not in subscriptions:
subscriptions[evt] = {}
if type(act) is list:
for a in act:
if a not in subscriptions[evt].keys():
subscriptions[evt][a] = []
if subscriber_id not in subscriptions[evt][a]:
subscriptions[evt][a].append(subscriber_id)
else:
if act not in subscriptions[evt].keys():
subscriptions[evt][act] = []
if subscriber_id not in subscriptions[evt][act]:
subscriptions[evt][act].append(subscriber_id)
def get_all_subscribers(self, subscribe_to_id: str) -> list:
"""Get a list of all subscribers to a component.
Args:
subscribe_to_id (str): The component id.
Returns: A list of subscriber IDs
"""
try:
subscriptions = self.subscriptions[subscribe_to_id]
except KeyError:
logging.warn('Component (%s) not found in subscriptions' % subscribe_to_id)
return []
subscribers = set()
for act, itm in subscriptions.items():
if type(itm) is list:
subscribers.update(itm)
if type(itm) is dict:
for itm_prop, itm_act in itm.items():
if type(itm_act) is list:
subscribers.update(itm_act)
else:
logging.error(code='FF.SUB.GET.002') # unknown error
return list(subscribers)
def get_subscriber_items(self, subscriber_id: str, subscribe_to_id: str) -> dict:
"""Get a dict of subscriber items.
Args:
subscriber_id (str): The subscriber ID to be deleted
subscribe_to_id (str): The is that the subscriber is listening to
Returns: A dict of subscriber items.
"""
subscription_items = {}
try:
subscriptions = self.subscriptions[subscribe_to_id]
except KeyError:
logging.warn('Component (%s) not found in subscriptions' % subscribe_to_id)
return {}
for act, itm in subscriptions.items():
if type(itm) is list:
if subscriber_id in itm:
subscription_items[act] = True
if type(itm) is dict:
for itm_act, itm_subs in itm.items():
if type(itm_subs) is not list:
logging.info('[get_subscriber_items] subscriber list is not list.')
continue
if subscriber_id in itm_subs:
if subscription_items.get(act) is None:
subscription_items[act] = {}
subscription_items[act][itm_act] = True
return subscription_items
def delete_subscriber(self, subscriber_id: str, subscribe_to_id: str,
event_action: EVENT_ACTON_TYPE = EVENT_ACTION_ANY,
delete_all: bool = False) -> int:
"""Deletes or Replace a subscriber.
Args:
subscriber_id (str): The subscriber ID to be deleted
subscribe_to_id (str): The is that the subscriber is listening to
event_action (list): The event types to be deleted
delete_all (bool): Delete from all event types if True
Returns:
(int): The number of subscriptions changed
"""
return self.delete_replace_subscriber(subscriber_id, subscribe_to_id, event_action, delete_all)
def delete_all_subscriptions(self, subscriber_id: str) -> int:
"""Delete all subscriptions from all devices from subscriber.
Args:
subscriber_id (str): The subscriber ID to be deleted
Returns:
(int): The number of subscriptions deleted
"""
total_deletions = 0
for sub in self.subscriptions.keys():
total_deletions += self.delete_subscriber(subscriber_id, sub, delete_all=True)
return total_deletions
def delete_replace_subscriber(self, subscriber_id: str, subscribe_to_id: str,
event_action: EVENT_ACTON_TYPE = EVENT_ACTION_ANY,
change_all: bool = False, new_subscriber_id: str = None) -> int:
"""Deletes or Replace a subscriber.
Args:
subscriber_id (str): The subscriber ID to be deleted
subscribe_to_id (str): The is that the subscriber is listening to
event_action (list): The event types to be deleted
change_all (bool): Delete from all event types if True
new_subscriber_id (str): New subscriber ID
Returns:
(int): The number of subscriptions changed
"""
changed_subscriptions = 0
event_action = verify_event_action(event_action)
try:
subscriptions = self.subscriptions[subscribe_to_id]
except KeyError:
logging.warn('Component (%s) not found in subscriptions' % subscribe_to_id)
return changed_subscriptions
subscription_items = self.get_subscriber_items(subscriber_id, subscribe_to_id)
if change_all:
for prop, act in subscription_items.items():
if type(act) is dict:
for act_itm in act.keys():
try:
subscriptions[prop][act_itm].remove(subscriber_id)
if new_subscriber_id is not None:
subscriptions[prop][act_itm].append(new_subscriber_id)
changed_subscriptions += 1
except (KeyError, ValueError):
logging.error(code='FF.SUB.DEL.007') # subscriber not found in subscriptions [it should have been]
except Exception as e:
logging.error(code='FF.SUB.DEL.001', args=(e)) # unknown error: %s
return changed_subscriptions
else:
for action in event_action:
if action == EVENT_ACTION_ANY and EVENT_ACTION_ANY in subscription_items:
if EVENT_ACTION_ANY in subscription_items[EVENT_ACTION_ANY]:
try:
subscriptions[EVENT_ACTON_TYPE][EVENT_ACTION_ANY].remove(subscriber_id)
if new_subscriber_id is not None:
subscriptions[EVENT_ACTON_TYPE][EVENT_ACTION_ANY].append(new_subscriber_id)
changed_subscriptions += 1
except (KeyError, ValueError):
logging.error(code='FF.SUB.DEL.003') # subscriber not found in subscriptions [it should have been]
except Exception as e:
logging.error(code='FF.SUB.DEL.004', args=(e)) # unknown error: %s
if type(action) is dict:
for act_itm, act_prop in action.items():
if act_itm in subscription_items:
if type(act_prop) is not list:
logging.error(code='FF.SUB.DEL.002') # subscribers are not type list
for p in act_prop:
try:
subscriptions[act_itm][p].remove(subscriber_id)
if new_subscriber_id is not None:
subscriptions[act_itm][p].append(new_subscriber_id)
changed_subscriptions += 1
except (KeyError, ValueError):
logging.error(code='FF.SUB.DEL.006') # subscriber not found in subscriptions [it should have been]
except Exception as e:
logging.error(code='FF.SUB.DEL.005', args=(e)) # unknown error: %s
return changed_subscriptions
def change_subscriber_parent_id(self, old_id: str, new_id: str) -> bool:
"""
Changes the parent subscriber ID.
The parent is the one being listed to.
[ PARENT] [CHILD]
{ DEVICE_SUBSCRIBED_TO: {EVENT_ACTION: [LIST_OF_SUBSCRIBERS], ...} }
Args:
old_id (str): Old parent ID
new_id (str): New Parent ID
Returns:
(bool) Action successful
"""
if old_id not in self.subscriptions:
return False
if new_id in self.subscriptions:
return False
self.subscriptions[new_id] = self.subscriptions[old_id]
del self.subscriptions[old_id]
return True
def change_subscriber_id(self, subscriber_id: str, new_subscriber_id: str) -> int:
"""
Changes the child subscriber ID.
The parent is the one being listed to.
[ PARENT] [CHILD]
{ DEVICE_SUBSCRIBED_TO: {EVENT_ACTION: [LIST_OF_SUBSCRIBERS], ...} }
Args:
old_id (str): Old parent ID
new_id (str): New Parent ID
Returns:
(int) Number of changes
"""
change_count = 0
for subscription in self.subscriptions:
change_count += self.delete_replace_subscriber(subscriber_id, subscription, change_all=True,
new_subscriber_id=new_subscriber_id)
return change_count
def verify_event_action(event_action: EVENT_ACTON_TYPE = EVENT_ACTION_ANY, get_subscribers: bool = False) -> list:
"""Takes and event action and returns a list of event actions.
Args:
event_action (EVENT_ACTON_TYPE): event_action passed in
Returns:
list: list of event_action dict
"""
if type(event_action) is dict:
return [verify_event_action_dict(event_action)]
if event_action == EVENT_ACTION_ANY:
return [{EVENT_ACTION_ANY:[EVENT_ACTION_ANY]}]
if type(event_action) is list:
new_event_actions = {}
for ea in event_action:
# Verify when not coming from get_subscribers
if not get_subscribers:
if type(ea) is not dict and ea != EVENT_ACTION_ANY:
logging.error(code='FF.SUB.VER.001',
args=(event_action)) # event_action: %s is not type dict. making any listener
if EVENT_ACTION_ANY in new_event_actions.keys():
new_event_actions[EVENT_ACTION_ANY].append(ea)
else:
new_event_actions[EVENT_ACTION_ANY] = [ea]
elif type(ea) is dict:
new_event_actions.update(verify_event_action_dict(ea))
elif ea == EVENT_ACTION_ANY:
if EVENT_ACTION_ANY in new_event_actions.keys():
new_event_actions[EVENT_ACTION_ANY].append(EVENT_ACTION_ANY)
else:
new_event_actions[EVENT_ACTION_ANY] = [EVENT_ACTION_ANY]
# Verify when coming from get_subscribers
if get_subscribers:
if type(ea) is not list:
new_event_actions.update(ea)
if type(ea) is list:
new_event_actions.update(ea)
return [new_event_actions]
logging.error(code='FF.SUB.VER.002') # event action not verified
return event_action
def verify_event_action_dict(event_action: dict) -> dict:
for evt, act in event_action.items():
if type(act) is not list:
event_action[evt] = [act]
return event_action
def verify_event_action_time(event_action: EVENT_ACTON_TYPE) -> list:
if type(event_action) is dict:
event_action = [event_action]
for ea in event_action:
# Check for missing items
if 'hour' not in ea.keys():
logging.error(code='FF.SUB.VER.003') # hour not in time action
ea = {}
continue
if 'minute' not in ea.keys():
logging.error(code='FF.SUB.VER.004') # minute not in time action
ea = {}
continue
if 'weekdays' not in ea.keys():
logging.error(code='FF.SUB.VER.005') # weekdays not in time action. setting to everyday.
ea['weekdays'] = [1,2,3,4,5,6,7]
# Verify time ranges
if ea['hour'] < 0 or ea['hour'] > 24:
logging.error(code='FF.SUB.VER.006') # hour is out of range
ea ={}
continue
if ea['minute'] < 0 or ea['minute'] > 60:
logging.error(code='FF.SUB.VER.007') # minute is out of range
ea ={}
continue
if max(ea['weekdays']) > 7 or min(ea['weekdays']) < 1:
logging.error(code='FF.SUB.VER.008') # weekdays is out of range
ea = {}
continue
return event_action
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 27 14:23:25 2017
@author: gtucker
"""
import numpy as np
from numpy.testing import assert_equal
from landlab import RasterModelGrid
from landlab.components import ErosionDeposition, FlowAccumulator
def test_erodep_slope_area_small_vs():
"""Test steady state run with Vs << 1."""
# Set up a 5x5 grid with open boundaries and low initial elevations.
rg = RasterModelGrid((5, 5))
z = rg.add_zeros("topographic__elevation", at="node")
z[:] = 0.01 * rg.x_of_node
# Create a D8 flow handler
fa = FlowAccumulator(rg, flow_director="FlowDirectorD8")
# Parameter values for test 1
K = 0.001
vs = 0.0001
U = 0.001
dt = 10.0
# Create the ErosionDeposition component...
ed = ErosionDeposition(rg, K=K, v_s=vs, m_sp=0.5, n_sp=1.0, solver="adaptive")
# ... and run it to steady state.
for i in range(1000):
fa.run_one_step()
ed.run_one_step(dt=dt)
z[rg.core_nodes] += U * dt
# Test the results
s = rg.at_node["topographic__steepest_slope"]
sa_factor = (1.0 + vs) * U / K
a11 = 2.0
a12 = 1.0
s = rg.at_node["topographic__steepest_slope"]
s11 = sa_factor * (a11**-0.5)
s12 = sa_factor * (a12**-0.5)
assert_equal(np.round(s[11], 3), np.round(s11, 3))
assert_equal(np.round(s[12], 3), np.round(s12, 3))
def test_erodep_slope_area_big_vs():
"""Test steady state run with Vs >> 1."""
# Set up a 5x5 grid with open boundaries and low initial elevations.
rg = RasterModelGrid((5, 5))
z = rg.add_zeros("topographic__elevation", at="node")
z[:] = 0.01 * rg.x_of_node
# Create a D8 flow handler
fa = FlowAccumulator(rg, flow_director="FlowDirectorD8")
# Next test: big Vs
K = 1.0
vs = 1000.0
U = 0.001
dt = 10.0
# Create the ErosionDeposition component...
ed = ErosionDeposition(rg, K=K, v_s=vs, m_sp=0.5, n_sp=1.0, solver="adaptive")
# ... and run it to steady state.
for i in range(1000):
fa.run_one_step()
ed.run_one_step(dt=dt)
z[rg.core_nodes] += U * dt
# Test the results
s = rg.at_node["topographic__steepest_slope"]
sa_factor = (1.0 + vs) * U / K
a11 = 2.0
a12 = 1.0
s11 = sa_factor * (a11**-0.5)
s12 = sa_factor * (a12**-0.5)
assert_equal(np.round(s[11], 2), np.round(s11, 2))
assert_equal(np.round(s[12], 2), np.round(s12, 2))
def test_erodep_slope_area_with_vs_unity():
"""Test steady state run with Vs = 1."""
# Set up a 5x5 grid with open boundaries and low initial elevations.
rg = RasterModelGrid((5, 5))
z = rg.add_zeros("topographic__elevation", at="node")
z[:] = 0.01 * rg.x_of_node
# Create a D8 flow handler
fa = FlowAccumulator(rg, flow_director="FlowDirectorD8")
# test: Vs = 1
K = 0.002
vs = 1.0
U = 0.001
dt = 10.0
# Create the ErosionDeposition component...
ed = ErosionDeposition(rg, K=K, v_s=vs, m_sp=0.5, n_sp=1.0, solver="adaptive")
# ... and run it to steady state.
for i in range(1000):
fa.run_one_step()
ed.run_one_step(dt=dt)
z[rg.core_nodes] += U * dt
# Test the results
s = rg.at_node["topographic__steepest_slope"]
sa_factor = (1.0 + vs) * U / K
a11 = 2.0
a12 = 1.0
s11 = sa_factor * (a11**-0.5)
s12 = sa_factor * (a12**-0.5)
assert_equal(np.round(s[11], 2), np.round(s11, 2))
assert_equal(np.round(s[12], 2), np.round(s12, 2))
def test_erodep_slope_area_shear_stress_scaling():
"""Test steady state run with m_sp = 0.33, n_sp=0.67, Vs = 1."""
# Set up a 5x5 grid with open boundaries and low initial elevations.
rg = RasterModelGrid((5, 5))
rg.set_closed_boundaries_at_grid_edges(True, True, True, False)
z = rg.add_zeros("topographic__elevation", at="node")
z[:] = 0.01 * rg.x_of_node
# Create a D8 flow handler
fa = FlowAccumulator(rg, flow_director="FlowDirectorD8")
# test: Vs = 1
K = 0.002
vs = 1.0
U = 0.001
dt = 10.0
m_sp = 0.33
n_sp = 0.67
# Create the ErosionDeposition component...
ed = ErosionDeposition(rg, K=K, v_s=vs, m_sp=m_sp, n_sp=n_sp, solver="adaptive")
# ... and run it to steady state.
for i in range(1500):
fa.run_one_step()
ed.run_one_step(dt=dt)
z[rg.core_nodes] += U * dt
# Test the results
s = rg.at_node["topographic__steepest_slope"]
sa_factor = ((1.0 + vs) * U / K) ** (1.0 / n_sp)
a6 = rg.at_node["drainage_area"][6]
a8 = rg.at_node["drainage_area"][8]
s6 = sa_factor * (a6 ** -(m_sp / n_sp))
s8 = sa_factor * (a8 ** -(m_sp / n_sp))
assert_equal(np.round(s[6], 2), np.round(s6, 2))
assert_equal(np.round(s[8], 2), np.round(s8, 2))
def test_erodep_slope_area_with_threshold():
"""Test steady state run with Vs = 1 and wc = 0.00001."""
# Set up a 5x5 grid with open boundaries and low initial elevations.
rg = RasterModelGrid((5, 5))
z = rg.add_zeros("topographic__elevation", at="node")
z[:] = 0.01 * rg.x_of_node
# Create a D8 flow handler
fa = FlowAccumulator(rg, flow_director="FlowDirectorD8")
# test: Vs = 1
K = 0.002
vs = 1.0
U = 0.001
dt = 10.0
wc = 0.0001
# Create the ErosionDeposition component...
ed = ErosionDeposition(
rg, K=K, v_s=vs, m_sp=0.5, n_sp=1.0, sp_crit=wc, solver="adaptive"
)
# ... and run it to steady state.
for i in range(1000):
fa.run_one_step()
ed.run_one_step(dt=dt)
z[rg.core_nodes] += U * dt
# Test the results
s = rg.at_node["topographic__steepest_slope"]
sa_factor = ((1.0 + vs) * U + wc) / K # approximate sol'n
a11 = 2.0
a12 = 1.0
s11 = sa_factor * (a11**-0.5)
s12 = sa_factor * (a12**-0.5)
assert_equal(np.round(s[11], 2), np.round(s11, 2))
assert_equal(np.round(s[12], 2), np.round(s12, 2))
|
|
import hashlib
import logging
from datetime import datetime
from django.db.backends.utils import strip_quotes
from django.db.models import Index
from django.db.transaction import TransactionManagementError, atomic
from django.utils import timezone
from django.utils.encoding import force_bytes
logger = logging.getLogger('django.db.backends.schema')
def _related_non_m2m_objects(old_field, new_field):
# Filter out m2m objects from reverse relations.
# Return (old_relation, new_relation) tuples.
return zip(
(obj for obj in old_field.model._meta.related_objects if not obj.field.many_to_many),
(obj for obj in new_field.model._meta.related_objects if not obj.field.many_to_many)
)
class BaseDatabaseSchemaEditor:
"""
This class and its subclasses are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s"
)
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False, atomic=True):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
self.atomic_migration = self.connection.features.can_rollback_ddl and atomic
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.atomic_migration:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.atomic_migration:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=()):
"""Execute the given SQL statement, with optional parameters."""
# Don't perform the transactional DDL check if SQL is being collected
# as it's not going to be executed anyway.
if not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl:
raise TransactionManagementError(
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
# Log the command we're running, then run it
logger.debug("%s; (params %r)", sql, params, extra={'params': params, 'sql': sql})
if self.collect_sql:
ending = "" if sql.endswith(";") else ";"
if params is not None:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
@classmethod
def _digest(cls, *args):
"""
Generate a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Take a field and return its column definition.
The field must already have had set_attributes_from_name() called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
include_default = include_default and not self.skip_default(field)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null and not self.connection.features.implied_column_null:
sql += " NULL"
elif not null:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
'subclasses of BaseDatabaseSchemaEditor for backends which have '
'requires_literal_defaults must provide a prepare_default() method'
)
def effective_default(self, field):
"""Return a field's effective database default value."""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = bytes()
else:
default = str()
elif getattr(field, 'auto_now', False) or getattr(field, 'auto_now_add', False):
default = datetime.now()
internal_type = field.get_internal_type()
if internal_type == 'DateField':
default = default.date
elif internal_type == 'TimeField':
default = default.time
elif internal_type == 'DateTimeField':
default = timezone.now
else:
default = None
# If it's a callable, call it
if callable(default):
default = default()
# Run it through the field's get_db_prep_save method so we can send it
# to the database.
default = field.get_db_prep_save(default, self.connection)
return default
def quote_value(self, value):
"""
Return a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Create a table and any accompanying indexes or unique constraints for
the given `model`.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() in ("AutoField", "BigAutoField"):
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers (always deferred, as some fields might be
# created afterwards, like geometry fields with some backends)
for fields in model._meta.unique_together:
columns = [model._meta.get_field(field).column for field in fields]
self.deferred_sql.append(self._create_unique_sql(model, columns))
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
if tablespace_sql:
sql += ' ' + tablespace_sql
# Prevent using [] as params, in the case a literal '%' is used in the definition
self.execute(sql, params or None)
# Add any field index and index_together's (deferred as SQLite3 _remake_table needs it)
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""Delete a model from the database."""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def add_index(self, model, index):
"""Add an index on a model."""
self.execute(index.create_sql(model, self))
def remove_index(self, model, index):
"""Remove an index from a model."""
self.execute(index.remove_sql(model, self))
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deal with a model changing its unique_together. The input
unique_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field(field).column for field in fields]
self.execute(self._create_unique_sql(model, columns))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deal with a model changing its index_together. The input
index_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(model, columns, **constraint_kwargs)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""Rename the table a model points to."""
if (old_db_table == new_db_table or
(self.connection.features.ignores_table_name_case and
old_db_table.lower() == new_db_table.lower())):
return
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""Move a model's table between tablespaces."""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Create a field on a model. Usually involves adding a column, but may
involve adding a table instead (for M2M fields).
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if not self.skip_default(field) and self.effective_default(field) is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
"type": db_params['type'],
}
}
self.execute(sql)
# Add an index, if required
self.deferred_sql.extend(self._field_indexes_sql(model, field))
# Add any FK constraints later
if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allow a field's type, uniqueness, nullability, default, column,
constraints, etc. to be modified.
`old_field` is required to compute the necessary changes.
If `strict` is True, raise errors if the old column does not match
`old_field` precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if ((old_type is None and old_field.remote_field is None) or
(new_type is None and new_field.remote_field is None)):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using a badly-written custom field?)" %
(old_field, new_field),
)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
old_field.remote_field.through._meta.auto_created and
new_field.remote_field.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
not old_field.remote_field.through._meta.auto_created and
not new_field.remote_field.through._meta.auto_created):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.remote_field and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name))
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
# Remove indexes if db_index switched to False or a unique constraint
# will now be used in lieu of an index. The following lines from the
# truth table show all True cases; the rest are False:
#
# old_field.db_index | old_field.unique | new_field.db_index | new_field.unique
# ------------------------------------------------------------------------------
# True | False | False | False
# True | False | False | True
# True | False | True | True
if old_field.db_index and not old_field.unique and (not new_field.db_index or new_field.unique):
# Find the index for this field
meta_index_names = {index.name for index in model._meta.indexes}
# Retrieve only BTREE indexes since this is what's created with
# db_index=True.
index_names = self._constraint_names(model, [old_field.column], index=True, type_=Index.suffix)
for index_name in index_names:
if index_name in meta_index_names:
# The only way to check if an index was created with
# db_index=True or with Index(['field'], name='foo')
# is to look at its name (refs #28053).
continue
self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name))
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(
model._meta.db_table, old_field, new_field, new_type
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
needs_database_default = (
old_field.null and
not new_field.null and
old_default != new_default and
new_default is not None and
not self.skip_default(new_field)
)
if needs_database_default:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"type": new_type,
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"type": new_type,
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if (self.connection.features.interprets_empty_strings_as_nulls and
new_field.get_internal_type() in ("CharField", "TextField")):
# The field is nullable in the database anyway, leave it alone
pass
elif new_field.null:
null_actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
null_actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = (
new_field.has_default() and
(old_field.null and not new_field.null)
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if (not old_field.unique and new_field.unique) or (
old_field.primary_key and not new_field.primary_key and new_field.unique
):
self.execute(self._create_unique_sql(model, [new_field.column]))
# Added an index? Add an index if db_index switched to True or a unique
# constraint will no longer be used in lieu of an index. The following
# lines from the truth table show all True cases; the rest are False:
#
# old_field.db_index | old_field.unique | new_field.db_index | new_field.unique
# ------------------------------------------------------------------------------
# False | False | True | False
# False | True | True | False
# True | True | True | False
if (not old_field.db_index or old_field.unique) and new_field.db_index and not new_field.unique:
self.execute(self._create_index_sql(model, [new_field]))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name))
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model._meta.db_table, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column % {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.remote_field and
(fks_dropped or not old_field.remote_field or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.related_objects:
if not rel.many_to_many:
self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk"))
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
"type": new_type,
}
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Return a two-tuple of: an SQL fragment of (sql, params) to insert into
an ALTER TABLE statement and a list of extra (sql, params) tuples to
run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""Alter M2Ms to repoint their to= endpoints."""
# Rename the through table
if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table:
self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.remote_field.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)
self.alter_field(
new_field.remote_field.through,
# for self-referential models we need to alter field from the other end too
old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generate a unique name for an index/unique constraint.
The name is divided into 3 parts: the table name, the column names,
and a unique digest and suffix.
"""
table_name = strip_quotes(model._meta.db_table)
hash_data = [table_name] + list(column_names)
hash_suffix_part = '%s%s' % (self._digest(*hash_data), suffix)
max_length = self.connection.ops.max_name_length() or 200
# If everything fits into max_length, use that name.
index_name = '%s_%s_%s' % (table_name, '_'.join(column_names), hash_suffix_part)
if len(index_name) <= max_length:
return index_name
# Shorten a long suffix.
if len(hash_suffix_part) > max_length / 3:
hash_suffix_part = hash_suffix_part[:max_length // 3]
other_length = (max_length - len(hash_suffix_part)) // 2 - 1
index_name = '%s_%s_%s' % (
table_name[:other_length],
'_'.join(column_names)[:other_length],
hash_suffix_part,
)
# Prepend D if needed to prevent the name from starting with an
# underscore or a number (not permitted on Oracle).
if index_name[0] == "_" or index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _get_index_tablespace_sql(self, model, fields):
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
return tablespace_sql
def _create_index_sql(self, model, fields, suffix="", sql=None):
"""
Return the SQL statement to create the index for one or several fields.
`sql` can be specified if the syntax differs from the standard (GIS
indexes, ...).
"""
tablespace_sql = self._get_index_tablespace_sql(model, fields)
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
return sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)),
"using": "",
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": tablespace_sql,
}
def _model_indexes_sql(self, model):
"""
Return a list of all index SQL statements (field indexes,
index_together, Meta.indexes) for the specified model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
output.extend(self._field_indexes_sql(model, field))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields, suffix="_idx"))
for index in model._meta.indexes:
output.append(index.create_sql(model, self))
return output
def _field_indexes_sql(self, model, field):
"""
Return a list of all index SQL statements for the specified field.
"""
output = []
if self._field_should_be_indexed(model, field):
output.append(self._create_index_sql(model, [field]))
return output
def _field_should_be_indexed(self, model, field):
return field.db_index and not field.unique
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
from_table = model._meta.db_table
from_column = field.column
to_table = field.target_field.model._meta.db_table
to_column = field.target_field.column
suffix = suffix % {
"to_table": to_table,
"to_column": to_column,
}
return self.sql_create_fk % {
"table": self.quote_name(from_table),
"name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)),
"column": self.quote_name(from_column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
"deferrable": self.connection.ops.deferrable_sql(),
}
def _create_unique_sql(self, model, columns):
return self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")),
"columns": ", ".join(self.quote_name(column) for column in columns),
}
def _delete_constraint_sql(self, template, model, name):
return template % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(name),
}
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None, type_=None):
"""Return all constraint names matching the columns and conditions."""
if column_names is not None:
column_names = [
self.connection.introspection.column_name_converter(name)
for name in column_names
]
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
if type_ is not None and infodict['type'] != type_:
continue
result.append(name)
return result
|
|
"""
a decorated KazooClient with handy operations on a ZK datatree and its znodes
"""
from contextlib import contextmanager
import os
import re
import socket
import sre_constants
import time
from kazoo.client import KazooClient, TransactionRequest
from kazoo.exceptions import NoAuthError, NoNodeError
from kazoo.protocol.states import KazooState
from .statmap import StatMap
from .tree import Tree
from .usage import Usage
from .util import get_ips, hosts_to_endpoints, join, to_bytes
@contextmanager
def connected_socket(address, timeout=3):
""" yields a connected socket """
sock = socket.create_connection(address, timeout)
yield sock
sock.close()
class ClientInfo(object):
__slots__ = "id", "ip", "port", "client_hostname", "server_ip", "server_port", "server_hostname"
def __init__(self, sid=None, ip=None, port=None, server_ip=None, server_port=None):
setattr(self, "id", sid)
setattr(self, "ip", ip)
setattr(self, "port", port)
setattr(self, "server_ip", server_ip)
setattr(self, "server_port", server_port)
setattr(self, "client_hostname", None)
setattr(self, "server_hostname", None)
def __call__(self, ip, port, server_ip, server_port):
setattr(self, "ip", ip)
setattr(self, "port", port)
setattr(self, "server_ip", server_ip)
setattr(self, "server_port", server_port)
def __str__(self):
return "%s %s" % (self.id, self.endpoints)
@property
def endpoints(self):
return "%s:%s %s:%s" % (self.ip, self.port, self.server_ip, self.server_port)
@property
def resolved(self):
self._resolve_hostnames()
return "%s %s" % (self.id, self.resolved_endpoints)
@property
def resolved_endpoints(self):
self._resolve_hostnames()
return "%s:%s %s:%s" % (
self.client_hostname, self.port, self.server_hostname, self.server_port)
def _resolve_hostnames(self):
if self.client_hostname is None and self.ip:
self.resolve_ip("client_hostname", self.ip)
if self.server_hostname is None and self.server_ip:
self.resolve_ip("server_hostname", self.server_ip)
def resolve_ip(self, attr, ip):
try:
hname = socket.gethostbyaddr(ip)[0]
setattr(self, attr, hname)
except socket.herror:
pass
class XTransactionRequest(TransactionRequest):
""" wrapper to make PY3K (slightly) painless """
def create(self, path, value=b"", acl=None, ephemeral=False,
sequence=False):
""" wrapper that handles encoding (yay Py3k) """
super(XTransactionRequest, self).create(path, to_bytes(value), acl, ephemeral, sequence)
def set_data(self, path, value, version=-1):
""" wrapper that handles encoding (yay Py3k) """
super(XTransactionRequest, self).set_data(path, to_bytes(value), version)
class XClient(KazooClient):
""" adds some extra methods to KazooClient """
class CmdFailed(Exception):
""" 4 letter cmd failed """
pass
SESSION_REGEX = re.compile(r"^(0x\w+):")
IP_PORT_REGEX = re.compile(r"^\tip:\s/(\d+\.\d+\.\d+\.\d+):(\d+)\ssessionId:\s(0x\w+)\Z")
PATH_REGEX = re.compile(r"^\t((?:/.*)+)\Z")
@property
def xid(self):
""" the session's current xid or -1 if not connected """
conn = self._connection
return conn._xid if conn else -1
@property
def session_timeout(self):
""" the negotiated session timeout """
return self._session_timeout
@property
def server(self):
""" the (hostaddr, port) of the connected ZK server (or "") """
conn = self._connection
return conn._socket.getpeername() if conn else ""
@property
def client(self):
""" the (hostaddr, port) of the local endpoint (or "") """
conn = self._connection
return conn._socket.getsockname() if conn else ""
@property
def sessionid(self):
return "0x%x" % (getattr(self, "_session_id", 0))
@property
def protocol_version(self):
""" this depends on https://github.com/python-zk/kazoo/pull/182,
so play conservatively
"""
return getattr(self, "_protocol_version", 0)
@property
def data_watches(self):
""" paths for data watches """
return self._data_watchers.keys()
@property
def child_watches(self):
""" paths for child watches """
return self._child_watchers.keys()
def get(self, *args, **kwargs):
""" wraps the default get() and deals with encoding """
value, stat = super(XClient, self).get(*args, **kwargs)
try:
if value is not None:
value = value.decode(encoding="utf-8")
except UnicodeDecodeError:
pass
return (value, stat)
def get_bytes(self, *args, **kwargs):
""" no string decoding performed """
return super(XClient, self).get(*args, **kwargs)
def set(self, path, value, version=-1):
""" wraps the default set() and handles encoding (Py3k) """
value = to_bytes(value)
super(XClient, self).set(path, value, version)
def create(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False):
""" wraps the default create() and handles encoding (Py3k) """
value = to_bytes(value)
return super(XClient, self).create(path, value, acl, ephemeral, sequence, makepath)
def create_async(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False):
""" wraps the default create() and handles encoding (Py3k) """
value = to_bytes(value)
return super(XClient, self).create_async(path, value, acl, ephemeral, sequence, makepath)
def transaction(self):
""" use XTransactionRequest which is encoding aware (Py3k) """
return XTransactionRequest(self)
def du(self, path):
""" returns the bytes used under path """
return Usage(self, path).value
def get_acls_recursive(self, path, depth, include_ephemerals):
"""A recursive generator wrapper for get_acls
:param path: path from which to start
:param depth: depth of the recursion (-1 no recursion, 0 means no limit)
:param include_ephemerals: get ACLs for ephemerals too
"""
yield path, self.get_acls(path)[0]
if depth == -1:
return
for tpath, _ in self.tree(path, depth, full_path=True):
try:
acls, stat = self.get_acls(tpath)
except NoNodeError:
continue
if not include_ephemerals and stat.ephemeralOwner != 0:
continue
yield tpath, acls
def find(self, path, match, flags):
""" find every matchin child path under path """
try:
match = re.compile(match, flags)
except sre_constants.error as ex:
print("Bad regexp: %s" % (ex))
return
for cpath in Tree(self, path).get():
if match.search(cpath):
yield cpath
def grep(self, path, content, flags):
""" grep every child path under path for content """
try:
match = re.compile(content, flags)
except sre_constants.error as ex:
print("Bad regexp: %s" % (ex))
return
for gpath, matches in self.do_grep(path, match):
yield (gpath, matches)
def do_grep(self, path, match):
""" grep's work horse """
try:
children = self.get_children(path)
except (NoNodeError, NoAuthError):
children = []
for child in children:
full_path = os.path.join(path, child)
try:
value, _ = self.get(full_path)
except (NoNodeError, NoAuthError):
value = ""
if value is not None:
matches = [line for line in value.split("\n") if match.search(line)]
if len(matches) > 0:
yield (full_path, matches)
for mpath, matches in self.do_grep(full_path, match):
yield (mpath, matches)
def child_count(self, path):
"""
returns the child count under path (deals with znodes going away as it's
traversing the tree).
"""
stat = self.stat(path)
if not stat:
return 0
count = stat.numChildren
for _, _, stat in self.tree(path, 0, include_stat=True):
if stat:
count += stat.numChildren
return count
def tree(self, path, max_depth, full_path=False, include_stat=False):
"""DFS generator which starts from a given path and goes up to a max depth.
:param path: path from which the DFS will start
:param max_depth: max depth of DFS (0 means no limit)
:param full_path: should the full path of the child node be returned
:param include_stat: return the child Znode's stat along with the name & level
"""
for child_level_stat in self.do_tree(path, max_depth, 0, full_path, include_stat):
yield child_level_stat
def do_tree(self, path, max_depth, level, full_path, include_stat):
""" tree's work horse """
try:
children = self.get_children(path)
except (NoNodeError, NoAuthError):
children = []
for child in children:
cpath = join(path, child) if full_path else child
if include_stat:
yield cpath, level, self.stat(join(path, child))
else:
yield cpath, level
if max_depth == 0 or level + 1 < max_depth:
cpath = join(path, child)
for rchild_rlevel_rstat in self.do_tree(cpath, max_depth, level + 1, full_path, include_stat):
yield rchild_rlevel_rstat
def fast_tree(self, path, exclude_recurse=None):
""" a fast async version of tree() """
for cpath in Tree(self, path).get(exclude_recurse):
yield cpath
def stat_map(self, path):
""" a generator for <child, Stat> """
return StatMap(self, path).get()
def diff(self, path_a, path_b):
""" Performs a deep comparison of path_a/ and path_b/
For each child, it yields (rv, child) where rv:
-1 if doesn't exist in path_b (destination)
0 if they are different
1 if it doesn't exist in path_a (source)
"""
path_a = path_a.rstrip("/")
path_b = path_b.rstrip("/")
if not self.exists(path_a) or not self.exists(path_b):
return
if not self.equal(path_a, path_b):
yield 0, "/"
seen = set()
len_a = len(path_a)
len_b = len(path_b)
# first, check what's missing & changed in dst
for child_a, level in self.tree(path_a, 0, True):
child_sub = child_a[len_a + 1:]
child_b = join(path_b, child_sub)
if not self.exists(child_b):
yield -1, child_sub
else:
if not self.equal(child_a, child_b):
yield 0, child_sub
seen.add(child_sub)
# now, check what's new in dst
for child_b, level in self.tree(path_b, 0, True):
child_sub = child_b[len_b + 1:]
if child_sub not in seen:
yield 1, child_sub
def equal(self, path_a, path_b):
"""
compare if a and b have the same bytes
"""
content_a, _ = self.get_bytes(path_a)
content_b, _ = self.get_bytes(path_b)
return content_a == content_b
def stat(self, path):
""" safely gets the Znode's Stat """
try:
stat = self.exists(str(path))
except (NoNodeError, NoAuthError):
stat = None
return stat
def _to_endpoints(self, hosts):
return [self.current_endpoint] if hosts is None else hosts_to_endpoints(hosts)
def mntr(self, hosts=None):
""" send an mntr cmd to either host or the connected server """
return self.cmd(self._to_endpoints(hosts), "mntr")
def cons(self, hosts=None):
""" send a cons cmd to either host or the connected server """
return self.cmd(self._to_endpoints(hosts), "cons")
def dump(self, hosts=None):
""" send a dump cmd to either host or the connected server """
return self.cmd(self._to_endpoints(hosts), "dump")
def cmd(self, endpoints, cmd):
"""endpoints is [(host1, port1), (host2, port), ...]"""
replies = []
for ep in endpoints:
try:
replies.append(self._cmd(ep, cmd))
except self.CmdFailed as ex:
# if there's only 1 endpoint, give up.
# if there's more, keep trying.
if len(endpoints) == 1:
raise ex
return "".join(replies)
def _cmd(self, endpoint, cmd):
""" endpoint is (host, port) """
cmdbuf = "%s\n" % (cmd)
# some cmds have large outputs and ZK closes the connection as soon as it
# finishes writing. so read in huge chunks.
recvsize = 1 << 20
replies = []
host, port = endpoint
ips = get_ips(host, port)
if len(ips) == 0:
raise self.CmdFailed("Failed to resolve: %s" % (host))
for ip in ips:
try:
with connected_socket((ip, port)) as sock:
sock.send(cmdbuf.encode())
while True:
buf = sock.recv(recvsize).decode("utf-8")
if buf == "":
break
replies.append(buf)
except socket.error as ex:
# if there's only 1 record, give up.
# if there's more, keep trying.
if len(ips) == 1:
raise self.CmdFailed("Error(%s): %s" % (ip, ex))
return "".join(replies)
@property
def current_endpoint(self):
if not self.connected:
raise self.CmdFailed("Not connected and no host given.")
# If we are using IPv6, getpeername() returns a 4-tuple
return self._connection._socket.getpeername()[:2]
def zk_url(self):
""" returns `zk://host:port` for the connected host:port """
return "zk://%s:%d" % self.current_endpoint
def reconnect(self):
""" forces a reconnect by shutting down the connected socket
return True if the reconnect happened, False otherwise
"""
state_change_event = self.handler.event_object()
def listener(state):
if state is KazooState.SUSPENDED:
state_change_event.set()
self.add_listener(listener)
self._connection._socket.shutdown(socket.SHUT_RDWR)
state_change_event.wait(1)
if not state_change_event.is_set():
return False
# wait until we are back
while not self.connected:
time.sleep(0.1)
return True
def dump_by_server(self, hosts):
"""Returns the output of dump for each server.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of ((server_ip, port), ClientInfo).
"""
dump_by_endpoint = {}
for endpoint in self._to_endpoints(hosts):
try:
out = self.cmd([endpoint], "dump")
except self.CmdFailed as ex:
out = ""
dump_by_endpoint[endpoint] = out
return dump_by_endpoint
def ephemerals_info(self, hosts):
"""Returns ClientInfo per path.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (path, ClientInfo).
"""
info_by_path, info_by_id = {}, {}
for server_endpoint, dump in self.dump_by_server(hosts).items():
server_ip, server_port = server_endpoint
sid = None
for line in dump.split("\n"):
mat = self.SESSION_REGEX.match(line)
if mat:
sid = mat.group(1)
continue
mat = self.PATH_REGEX.match(line)
if mat:
info = info_by_id.get(sid, None)
if info is None:
info = info_by_id[sid] = ClientInfo(sid)
info_by_path[mat.group(1)] = info
continue
mat = self.IP_PORT_REGEX.match(line)
if mat:
ip, port, sid = mat.groups()
if sid not in info_by_id:
continue
info_by_id[sid](ip, int(port), server_ip, server_port)
return info_by_path
def sessions_info(self, hosts):
"""Returns ClientInfo per session.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (session_id, ClientInfo).
"""
info_by_id = {}
for server_endpoint, dump in self.dump_by_server(hosts).items():
server_ip, server_port = server_endpoint
for line in dump.split("\n"):
mat = self.IP_PORT_REGEX.match(line)
if mat is None:
continue
ip, port, sid = mat.groups()
info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port)
return info_by_id
|
|
#!/usr/bin/env python
import numpy as np
import sys
# import string
__all__ = ['tokenizeline', 'guesstype', 'guessarraytype']
def tokenizeline(line, delimitter="", ignorestrings="#", prependstring=None,
format='list'):
"""
splits the string line into two substrings before and after the
first instance of a string in the list ignorestrings, and returns
a tuple of a list/tuple of tokens obtained by tokenizing the first
substring on the delimiter delimitter and the second substring.
Parameters
----------
line: mandatory, string
string to tokenize
delimitter: optional, defaults to ""
string of characters (other than whitespace) to
be used as a delimiter for tokenizing the line.
for example in the case of a line of TSV, it would be "\t"
ignorestrings: string, optional, defaults to "#"
string, after which the remainder of the line will be ignored
in the list of tokens
prependstring: string, optional defaults to None
if not None, assumes desired data is prepended by prependstring, and
removes this prepended string
format: string, optional defaults to 'list'
describes the format of the collection of tokens and can be either
'list' or 'tuple'
Returns
-------
tuple: (lst, list of metadata)
lst is a list/tuple of token strings, list of metadata strings
Examples
--------
>>> myline = "KJAHS KH AKJHS jjhJH. JH HJ JHH JH #tests "
>>> tokenizeline(myline, delimitter=".")
(['KJAHS KH AKJHS jjhJH', ' JH HJ JHH JH'], ['tests'])
>>> tokenizeline(myline, delimitter="")
(['KJAHS', 'KH', 'AKJHS', 'jjhJH.', 'JH', 'HJ', 'JHH', 'JH'], ['tests'])
>>> myline = "data KJAHS KH AKJHS jjhJH. JH HJ JHH JH #tests "
>>> tokenizeline(myline, delimitter="", prependstring='data')
(['KJAHS', 'KH', 'AKJHS', 'jjhJH.', 'JH', 'HJ', 'JHH', 'JH'], ['tests'])
.. note:: slightly diff signature from _tokenizeline which seemed to be \
too complicated and is done more simply here, as the metadata is captured \
as a list rather than a comment string.
TODO: allow multiple delimiter strings.
"""
dataline = line.strip()
# Find comments to ignore
commentlist = []
if ignorestrings is not None:
lst = dataline.split(ignorestrings)
commentlist = lst[1:]
dataline = lst[0]
# print 'ignorestrings ', ignorestrings
# print 'linelst ', dataline
if prependstring is not None:
dataline = dataline.lstrip(prependstring)
dataline = dataline.strip()
if delimitter == '':
tokens = dataline.split()
# print 'tokens', tokens
else:
tokens = dataline.split(delimitter)
if format == 'tuple':
tokens = tuple(tokens)
return (tokens, commentlist)
def guesstype(s, makeintfloats=False):
"""
guess the datatype (between ints, floats, str) of the object printed
as a string and return a tuple of (dtype, data in appropriate dtype)
Parameters
----------
s : mandatory, string
elemental python data type whose type we want to obtain
makeintfloats: optional, bool, defaults to False
forces integers to float (f4)
Returns
-------
tuple: (dtype, sprime)
where sprime is the data represented by the string in its
appropriate datatype.
Examples
--------
>>> s = '123'
>>> guesstype(s)
('i8', 123)
>>> guesstype(s, makeintfloats=True)
('f4', 123.0)
>>> guesstype('12.3')
('f4', 12.3)
>>> guesstype('s23')
('a20', 's23')
.. note:: We cannot use s.__class__ as we are starting from a string
"""
try:
int(s)
if makeintfloats:
return 'f4', float(s)
else:
return 'i8', int(s)
except ValueError:
pass
try:
float(s)
return 'f4', float(s)
except ValueError:
pass
return "a20", s
def guessarraytype(arr, makeintfloats=False):
"""
guess the underlying datatype (out of 'i8', 'f4', 'a20') of an iterable
of strings. If the iterable contains strings that are guessed to be of
different types, the most 'general' type will be returned, where we mean
('i8', 'f4', 'a20') are assumed to be in increasing order of generality.
Parameters
----------
iterable : mandatory, array-like object of strings
collection of strings
makeintfloats: optional, bool, defaults to False
If true, assumes that strings that can be integers are actually
floats, so that strings like '3' are treated as '3.0'
Returns
-------
One of 'i8', 'f4', 'a20'
Examples
--------
>>> arr = ['3', '2', '4']
>>> guessarraytype(arr)
'i8'
>>> arr = ['3', '2', '4']
>>> guessarraytype(arr, makeintfloats=True)
'f4'
>>> arr = ['3', '2', '4', '7.0']
>>> guessarraytype(arr, makeintfloats=False)
'f4'
>>> arr = ['3.4', '2.7', '4.0']
>>> guessarraytype(arr)
'f4'
>>> arr = ['3.4', '2.7', '4.0', 's23']
>>> guessarraytype(arr)
'a20'
"""
typearr = np.array(map(lambda x: guesstype(x,
makeintfloats=makeintfloats)[0], arr))
if any(typearr == 'a20'):
return 'a20'
elif any(typearr == 'f4'):
return 'f4'
elif all(typearr == 'i8'):
return 'i8'
else:
raise ValueError('It seems that guesstype is not finding one of \
\'f4\', \'i8\' or \'a20\' as the types of all elements in arr')
sys.exit()
def _tokenizeline(line, delimstrings=" ", ignorestrings=["#"]):
"""
splits the string line into two substrings before and after the
first instance of a string in the list ignorestrings, and returns
a tuple of a list of tokens obtained by tokenizing the first
substring on the delimiter delimstrings and the second substring.
Parameters
----------
line: mandatory, string
string to tokenize
delimstrings: optional, defaults to ""
string of characters (other than whitespace) to
be used as a delimiter for tokenizing the line.
for example in the case of a line of TSV, it would be "\t"
ignorestrings: optional, defaults to ["#"]
list of strings, occurances of any of which in a
line indicates the remainder of the line is a
comment which should not be tokenized
Returns
-------
tuple: list of token strings, string of comments
Examples
--------
>>> myline = "KJAHS KH AKJHS jjhJH. JH HJ JHH JH #tests "
>>> _tokenizeline(myline, delimstrings=".")
(['KJAHS KH AKJHS jjhJH', 'JH HJ JHH JH'], '#tests')
>>> _tokenizeline(myline, delimstrings="")
(['KJAHS', 'KH', 'AKJHS', 'jjhJH.', 'JH', 'HJ', 'JHH', 'JH'], '#tests')
..notes:
status: Will not be using, trying to use tokenizeline instead
Sat Jan 17 18:20:54 PST 2015
Tested, seems to work correctly,
testio.py
#Section: Test tokenization:
R. Biswas, July 17, 2012
Rewritten to work for multiple ignorestrings in list to fix bug,
R. Biswas, Sep 15, 2012
TODO: allow multiple delimiter strings.
"""
tokens=[]
comments = ''
tmp = line.strip()
if tmp:
minlengthforst = -1
actualignorestring = None
lengthofline = len(tmp)
#Find the ignore string that occurs first
for st in ignorestrings:
linelist = tmp.split(st)
lengthforst = len(linelist[0])
if lengthforst < lengthofline:
#These strings are on the line
if lengthforst < minlengthforst or -1 == minlengthforst:
actualignorestring = st
minlengthforst = lengthforst
tokstring = ""
if actualignorestring:
linelist = tmp.split(actualignorestring)
if len(linelist[1])>1:
comments = actualignorestring + actualignorestring.join(linelist[1:])
tokstring = linelist[0]
else:
tokstring = tmp
if delimstrings== "":
tokens = tokstring.split()
else:
#print "delimstring " , delimstrings
tokens = map(lambda x: x.strip(), tokstring.split(delimstrings))
ret = ( tokens , comments)
return ret
def builddict(fname,
ignorestrings=['#'],
dictdelim='=',
startblock = None,
endblock =None):
"""builddict (fname) reads in the file with filename
fname, and builds a dictionary of keys vs values from
it
args:
fname: mandatory, string
filename from which the dictionary is to be built
ignorestring: optional, string, defaults to ["#"]
list of strings, after which the remaining part of
the line should be ignored.
dictdelim: optional, string, defaults to '='
delimiter used to separate keys, values
in building the dictionary
startblock = optional, string, defaults to None
Can do a replace within only the starting and ending
blocks but both must be provided. These blocks can
start with a comment string
endblock = string, optional, defaults to None
Can do a replace within only the starting and ending
blocks but both must be provided. These blocks can
start with a comment string
returns:
dictionary of keys and values (in strings)
example usage :
builddict ( fname)
status:
Seems to work correctly, tested on CAMB params.ini,
R. Biswas, July 08, 2012
That was in configdict. Rewritten to use ioutilst, not tested
yet,
R. Biswas, Aug 09, 2012
"""
f = open(fname, "r")
line = f.readline()
i = 0
#print ignorestrings
paramdict={}
readin = False
while line != '':
if startblock:
if (readin ==False):
if line.find(startblock) !=-1:
readin = True
else:
readin =True
if readin == False:
line = f.readline()
continue
#while line != '':
tmp = _tokenizeline(line, ignorestrings = ignorestrings ,
delimstrings = dictdelim)
#print line , tmp
tmp = tmp[0]
if len(tmp) >1:
key = tmp[0].strip()
#print key, tmp
val = tmp[1].strip()
paramdict[str(key)] = str(val)
line=f.readline()
if endblock and line.find(endblock) !=-1:
readin = False
#print "FOUND ENDBLOCK"
continue
f.close()
return paramdict
def loadfile2array(fname,
datastrings = [],
datadelims = "",
ignorestrings = ["#"],
ignorelines = [],
ignorecols = [] ,
usecols = [],
usecoldicts = [],
validatetable = True,
converttofloat =False,
keys = None ,
makeintfloats = False,
verbose = False,
extension =''):
"""loadfiletoarray loads a (part) of a ASCII file to a list or a
numpy array.
args:
fname: mandatory , string
name of file which is to be read
eg. "FITOPT001.FITRES
datastrings: optional, list of strings, defaults to []
if not an empty list, contains strings with which
the lines containing data to be turned into a np
array.
datadelims: optional, string, defaults to ""
if equal to "" (default) the data delimiters are
assumed to be whitespace. Otherwise this string
has to be specified (eg for a CSV)
ignorelines: optional, list of integers, defaults to []
list of file linenumbers on the file which will be
ignored. These linenumbers start from 1 and match
the line numbers shown by vi
ignorestrings: optional, list of strings, defaults to []
if not an empyty list, contains strings after which
a line will not be read in
usecols: optional, list of integers, defaults to []
only load these cols into the array
ignorecols: optional, list of integers, defaults to []
do not load these cols into the array.
NB: It is expected that none or only one of
usecols, and ignorecols will be used
usecoldicts: optional, list of integers , defaults to []
col number of a set of strings that could be used
to identify the row
validatetable: optional, defaults to True
if True, checks that the number of elements in mylist
for each row is the same. On success it returns a return
code of 0, else a return code of 1
converttofloat: optional, defaults to False
if True, then it converts the Table to a numpy
array of floats
if False, the it leaves the table as a list of strings
verbose:
optional, bool, defaults to False
if True, turns on vmode, printing out messages.
extension: optional, defaults to ""
if 'gz', uses the gzip library to open gzipped files
returns:
tuple
if converttofloat == True,
(numpy structued 2D array , list of strings,
returncode )
else ,
(list of list of strings ,
empty list of strings , returncode)
returncode = 0 , everything checked out
= 1 , terrible failure
I PLAN TO KEEP returncode AS THE LAST ENTRY
OF THE TUPLE, R. Biswas, July 18, 2012
example usage:
(data , dictlist , returncode) =
io.loadfiletoarray("FITOPT001.FITRES",
datastrings=["SN"],
ignorecols=[0,1],
converttofloat=True,
usecoldicts = [0,1])
status:
tested using testio.py
Most features seem to work
R. Biswas, July 18, 2012
updated this routine to put in a real coldict. Have no idea
why I wrote the return values the way they were. Got rid of
dictlist and have a col. I don't see the point of having
multiple values in the dictionary
R. Biswas, Aug 11,2012
rewritten from loadfiletoarray to use a numpy structured array
R. Biswas, Mon Mar 25 00:31:47 CDT 2013
Fixed bug that arose when a column had inconsistent types, eg.
starting with int but then incorporating strings (as in
cids) by looking at the entire column.
R. Biswas, Mon Mar 25 09:06:14 CDT 2013
"""
import numpy as np
import gzip
vmode = False
if verbose :
vmode = True
if extension=="":
f = open(fname,"r")
elif extension == "gz":
f = gzip.open(fname,"rb")
else:
"Don't know what this extension is"
return 1
line = f.readline()
linenum = 1
mylist = []
numelems = 0 #Number of elements in each row of the list
numtokens = 0
if vmode:
print "INPUTS "
print "datastrings", "usecols", "ignorecols"
print datastrings, usecols , ignorecols , "\n"
while line!="":
if verbose:
print 'iterating line loop'
tokens = []
newtoken = False
currentline = line
line = f.readline() #CHECK
linenum +=1
if vmode:
print "Linenum = ", linenum
print "corresponding line = ", currentline +"\n"
# Leave out lines that we don't want
if linenum in ignorelines:
if vmode:
print "Ignoring line ", currentline, "in ignorelines ", ignorelines
continue
if any(map(lambda x: currentline.startswith(x),ignorestrings)):
if vmode:
print "Ignoring line ", currentline, "starting with ignorestrings ", ignorestrings
continue
#If there is a datastring
if len(datastrings)==0:
#orig tokens , comments = tokenizeline (currentline,
tokens , comments = tokenizeline (currentline,
ignorestrings = ignorestrings,
delimstrings = datadelims)
newtoken = True
numtokens = len(tokens)
if vmode:
print "in line no "+ linenum + numtokens +"tokens were found"
elif any(map(lambda x: currentline.startswith(x), datastrings)):
# orig tokens, comments = tokenizeline (currentline,
tokens, comments = tokenizeline(currentline,
ignorestrings=ignorestrings,
delimstrings=datadelims)
if vmode:
print "current line ", currentline + " tokenized to ", tokens
newtoken = True
numtokens = len(tokens)
else:
pass
if validatetable:
if numelems == 0:
numelems = numtokens
if numelems != numtokens:
return ([], [], 1)
if newtoken:
if vmode:
print "new tokens found of length", len(tokens)
print "These tokens are ", tokens
if len(tokens) > 0:
mylist.append(tokens)
# line = f.readline()
# print line , "\n", tokens
if verbose:
print "mylist now of length ", len(mylist)
print "mylist = ", mylist
f.close()
if vmode:
print "printing mylist[0]"
print mylist[0]
cutlist = []
dictlist = []
coldict = {}
# ##Choose Columns for list
if len(ignorecols) > 0:
usecols = [i for i in range(len(mylist[0])) if i not in ignorecols]
if vmode:
print len(mylist[0])
print len(usecols)
cutlistiter = 0
if (len(usecols) < len(mylist[0])) and (len(usecols) != 0):
for row in mylist:
cutrow = [row[i] for i in range(len(row)) if i in usecols]
cutlist.append(cutrow)
# print usecoldicts
if len(usecoldicts) > 0:
dictrow = [row[i] for i in range(len(row)) if i in usecoldicts]
dictlist.append(dictrow)
coldict[dictrow[0]] = cutlistiter
cutlistiter += 1
else:
cutlist = mylist
# ## Assuming things can be turned into floats
if converttofloat:
# # Check the data types of 1st row
types = getdatatypes(cutlist, keys=keys, makeintfloats=makeintfloats)
# print types
# print cutlist
# print len(cutlist)
cutarray = np.zeros(len(cutlist), dtype=types)
# print len(cutarray)
for i in range(len(cutlist)):
# map(float, cutlist[i])
# cutarray[i] = np.array(map(float,cutlist[i]))
# print len(cutlist)
cutarray[i] = tuple(cutlist[i])
# print i, len(cutlist[i]), len(cutarray[i])
# print cutlist[i]
# print cutarray[i]
# print "length of array ", len(cutarray)
# return (cutarray ,dictlist , 0 )
return (cutarray, coldict, 0)
# return (cutlist , dictlist , 0 )
return (cutlist, coldict, 0)
|
|
"""Test the Xiaomi Miio config flow."""
from unittest.mock import Mock, patch
from construct.core import ChecksumError
from micloud.micloudexception import MiCloudAccessDenied
from miio import DeviceException
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import zeroconf
from homeassistant.components.xiaomi_miio import const
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from . import TEST_MAC
from tests.common import MockConfigEntry
ZEROCONF_NAME = "name"
ZEROCONF_PROP = "properties"
ZEROCONF_MAC = "mac"
TEST_HOST = "1.2.3.4"
TEST_HOST2 = "5.6.7.8"
TEST_CLOUD_USER = "username"
TEST_CLOUD_PASS = "password"
TEST_CLOUD_COUNTRY = "cn"
TEST_TOKEN = "12345678901234567890123456789012"
TEST_NAME = "Test_Gateway"
TEST_NAME2 = "Test_Gateway_2"
TEST_MODEL = const.MODELS_GATEWAY[0]
TEST_MAC2 = "mn:op:qr:st:uv:wx"
TEST_MAC_DEVICE = "abcdefghijkl"
TEST_MAC_DEVICE2 = "mnopqrstuvwx"
TEST_GATEWAY_ID = TEST_MAC
TEST_HARDWARE_VERSION = "AB123"
TEST_FIRMWARE_VERSION = "1.2.3_456"
TEST_ZEROCONF_NAME = "lumi-gateway-v3_miio12345678._miio._udp.local."
TEST_CLOUD_DEVICES_1 = [
{
"parent_id": None,
"name": TEST_NAME,
"model": TEST_MODEL,
"localip": TEST_HOST,
"mac": TEST_MAC_DEVICE,
"token": TEST_TOKEN,
}
]
TEST_CLOUD_DEVICES_2 = [
{
"parent_id": None,
"name": TEST_NAME,
"model": TEST_MODEL,
"localip": TEST_HOST,
"mac": TEST_MAC_DEVICE,
"token": TEST_TOKEN,
},
{
"parent_id": None,
"name": TEST_NAME2,
"model": TEST_MODEL,
"localip": TEST_HOST2,
"mac": TEST_MAC_DEVICE2,
"token": TEST_TOKEN,
},
]
@pytest.fixture(name="xiaomi_miio_connect", autouse=True)
def xiaomi_miio_connect_fixture():
"""Mock denonavr connection and entry setup."""
mock_info = get_mock_info()
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
), patch(
"homeassistant.components.xiaomi_miio.config_flow.MiCloud.login",
return_value=True,
), patch(
"homeassistant.components.xiaomi_miio.config_flow.MiCloud.get_devices",
return_value=TEST_CLOUD_DEVICES_1,
), patch(
"homeassistant.components.xiaomi_miio.async_setup_entry", return_value=True
), patch(
"homeassistant.components.xiaomi_miio.async_unload_entry", return_value=True
):
yield
def get_mock_info(
model=TEST_MODEL,
mac_address=TEST_MAC,
hardware_version=TEST_HARDWARE_VERSION,
firmware_version=TEST_FIRMWARE_VERSION,
):
"""Return a mock gateway info instance."""
gateway_info = Mock()
gateway_info.model = model
gateway_info.mac_address = mac_address
gateway_info.hardware_version = hardware_version
gateway_info.firmware_version = firmware_version
return gateway_info
async def test_config_flow_step_gateway_connect_error(hass):
"""Test config flow, gateway connection error."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MANUAL: True},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {}
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
side_effect=DeviceException({}),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "form"
assert result["step_id"] == "connect"
assert result["errors"] == {"base": "cannot_connect"}
async def test_config_flow_gateway_success(hass):
"""Test a successful config flow."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MANUAL: True},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_MODEL
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
const.CONF_CLOUD_USERNAME: None,
const.CONF_CLOUD_PASSWORD: None,
const.CONF_CLOUD_COUNTRY: None,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC,
}
async def test_config_flow_gateway_cloud_success(hass):
"""Test a successful config flow using cloud."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC,
}
async def test_config_flow_gateway_cloud_multiple_success(hass):
"""Test a successful config flow using cloud with multiple devices."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
with patch(
"homeassistant.components.xiaomi_miio.config_flow.MiCloud.get_devices",
return_value=TEST_CLOUD_DEVICES_2,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
},
)
assert result["type"] == "form"
assert result["step_id"] == "select"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"select_device": f"{TEST_NAME2} - {TEST_MODEL}"},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME2
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
CONF_HOST: TEST_HOST2,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC2,
}
async def test_config_flow_gateway_cloud_incomplete(hass):
"""Test a failed config flow using incomplete cloud credentials."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
},
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {"base": "cloud_credentials_incomplete"}
async def test_config_flow_gateway_cloud_login_error(hass):
"""Test a failed config flow using cloud login error."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
with patch(
"homeassistant.components.xiaomi_miio.config_flow.MiCloud.login",
return_value=False,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
},
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {"base": "cloud_login_error"}
with patch(
"homeassistant.components.xiaomi_miio.config_flow.MiCloud.login",
side_effect=MiCloudAccessDenied({}),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
},
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {"base": "cloud_login_error"}
async def test_config_flow_gateway_cloud_no_devices(hass):
"""Test a failed config flow using cloud with no devices."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
with patch(
"homeassistant.components.xiaomi_miio.config_flow.MiCloud.get_devices",
return_value=[],
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
},
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {"base": "cloud_no_devices"}
async def test_config_flow_gateway_cloud_missing_token(hass):
"""Test a failed config flow using cloud with a missing token."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
cloud_device = [
{
"parent_id": None,
"name": TEST_NAME,
"model": TEST_MODEL,
"localip": TEST_HOST,
"mac": TEST_MAC_DEVICE,
"token": None,
}
]
with patch(
"homeassistant.components.xiaomi_miio.config_flow.MiCloud.get_devices",
return_value=cloud_device,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
},
)
assert result["type"] == "abort"
assert result["reason"] == "incomplete_info"
async def test_zeroconf_gateway_success(hass):
"""Test a successful zeroconf discovery of a gateway."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host=TEST_HOST,
addresses=[TEST_HOST],
hostname="mock_hostname",
name=TEST_ZEROCONF_NAME,
port=None,
properties={ZEROCONF_MAC: TEST_MAC},
type="mock_type",
),
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC,
}
async def test_zeroconf_unknown_device(hass):
"""Test a failed zeroconf discovery because of a unknown device."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host=TEST_HOST,
addresses=[TEST_HOST],
hostname="mock_hostname",
name="not-a-xiaomi-miio-device",
port=None,
properties={ZEROCONF_MAC: TEST_MAC},
type="mock_type",
),
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_miio"
async def test_zeroconf_no_data(hass):
"""Test a failed zeroconf discovery because of no data."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host=None,
addresses=[],
hostname="mock_hostname",
name=None,
port=None,
properties={},
type="mock_type",
),
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_miio"
async def test_zeroconf_missing_data(hass):
"""Test a failed zeroconf discovery because of missing data."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host=TEST_HOST,
addresses=[TEST_HOST],
hostname="mock_hostname",
name=TEST_ZEROCONF_NAME,
port=None,
properties={},
type="mock_type",
),
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_miio"
async def test_config_flow_step_device_connect_error(hass):
"""Test config flow, device connection error."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MANUAL: True},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {}
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
side_effect=DeviceException({}),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "form"
assert result["step_id"] == "connect"
assert result["errors"] == {"base": "cannot_connect"}
async def test_config_flow_step_unknown_device(hass):
"""Test config flow, unknown device error."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MANUAL: True},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {}
mock_info = get_mock_info(model="UNKNOWN")
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "form"
assert result["step_id"] == "connect"
assert result["errors"] == {"base": "unknown_device"}
async def test_import_flow_success(hass):
"""Test a successful import form yaml for a device."""
mock_info = get_mock_info(model=const.MODELS_SWITCH[0])
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_NAME: TEST_NAME, CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_DEVICE,
const.CONF_CLOUD_USERNAME: None,
const.CONF_CLOUD_PASSWORD: None,
const.CONF_CLOUD_COUNTRY: None,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: const.MODELS_SWITCH[0],
const.CONF_MAC: TEST_MAC,
}
async def test_config_flow_step_device_manual_model_error(hass):
"""Test config flow, device connection error, model None."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MANUAL: True},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {}
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=get_mock_info(model=None),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "form"
assert result["step_id"] == "connect"
assert result["errors"] == {"base": "cannot_connect"}
async def test_config_flow_step_device_manual_model_succes(hass):
"""Test config flow, device connection error, manual model."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MANUAL: True},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {}
error = DeviceException({})
error.__cause__ = ChecksumError({})
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
side_effect=error,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "form"
assert result["step_id"] == "connect"
assert result["errors"] == {"base": "wrong_token"}
overwrite_model = const.MODELS_VACUUM[0]
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
side_effect=DeviceException({}),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MODEL: overwrite_model},
)
assert result["type"] == "create_entry"
assert result["title"] == overwrite_model
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_DEVICE,
const.CONF_CLOUD_USERNAME: None,
const.CONF_CLOUD_PASSWORD: None,
const.CONF_CLOUD_COUNTRY: None,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: overwrite_model,
const.CONF_MAC: None,
}
async def config_flow_device_success(hass, model_to_test):
"""Test a successful config flow for a device (base class)."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MANUAL: True},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {}
mock_info = get_mock_info(model=model_to_test)
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == model_to_test
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_DEVICE,
const.CONF_CLOUD_USERNAME: None,
const.CONF_CLOUD_PASSWORD: None,
const.CONF_CLOUD_COUNTRY: None,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: model_to_test,
const.CONF_MAC: TEST_MAC,
}
async def config_flow_generic_roborock(hass):
"""Test a successful config flow for a generic roborock vacuum."""
DUMMY_MODEL = "roborock.vacuum.dummy"
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MANUAL: True},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {}
mock_info = get_mock_info(model=DUMMY_MODEL)
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == DUMMY_MODEL
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_DEVICE,
const.CONF_CLOUD_USERNAME: None,
const.CONF_CLOUD_PASSWORD: None,
const.CONF_CLOUD_COUNTRY: None,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: DUMMY_MODEL,
const.CONF_MAC: TEST_MAC,
}
async def zeroconf_device_success(hass, zeroconf_name_to_test, model_to_test):
"""Test a successful zeroconf discovery of a device (base class)."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host=TEST_HOST,
addresses=[TEST_HOST],
hostname="mock_hostname",
name=zeroconf_name_to_test,
port=None,
properties={"poch": f"0:mac={TEST_MAC_DEVICE}\x00"},
type="mock_type",
),
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{const.CONF_MANUAL: True},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {}
mock_info = get_mock_info(model=model_to_test)
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == model_to_test
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_DEVICE,
const.CONF_CLOUD_USERNAME: None,
const.CONF_CLOUD_PASSWORD: None,
const.CONF_CLOUD_COUNTRY: None,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: model_to_test,
const.CONF_MAC: TEST_MAC,
}
async def test_config_flow_plug_success(hass):
"""Test a successful config flow for a plug."""
test_plug_model = const.MODELS_SWITCH[0]
await config_flow_device_success(hass, test_plug_model)
async def test_zeroconf_plug_success(hass):
"""Test a successful zeroconf discovery of a plug."""
test_plug_model = const.MODELS_SWITCH[0]
test_zeroconf_name = const.MODELS_SWITCH[0].replace(".", "-")
await zeroconf_device_success(hass, test_zeroconf_name, test_plug_model)
async def test_config_flow_vacuum_success(hass):
"""Test a successful config flow for a vacuum."""
test_vacuum_model = const.MODELS_VACUUM[0]
await config_flow_device_success(hass, test_vacuum_model)
async def test_zeroconf_vacuum_success(hass):
"""Test a successful zeroconf discovery of a vacuum."""
test_vacuum_model = const.MODELS_VACUUM[0]
test_zeroconf_name = const.MODELS_VACUUM[0].replace(".", "-")
await zeroconf_device_success(hass, test_zeroconf_name, test_vacuum_model)
async def test_options_flow(hass):
"""Test specifying non default settings using options flow."""
config_entry = MockConfigEntry(
domain=const.DOMAIN,
unique_id=TEST_GATEWAY_ID,
data={
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC,
},
title=TEST_NAME,
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
const.CONF_CLOUD_SUBDEVICES: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
const.CONF_CLOUD_SUBDEVICES: True,
}
async def test_options_flow_incomplete(hass):
"""Test specifying incomplete settings using options flow."""
config_entry = MockConfigEntry(
domain=const.DOMAIN,
unique_id=TEST_GATEWAY_ID,
data={
const.CONF_CLOUD_USERNAME: None,
const.CONF_CLOUD_PASSWORD: None,
const.CONF_CLOUD_COUNTRY: None,
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC,
},
title=TEST_NAME,
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
const.CONF_CLOUD_SUBDEVICES: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
assert result["errors"] == {"base": "cloud_credentials_incomplete"}
async def test_reauth(hass):
"""Test a reauth flow."""
config_entry = MockConfigEntry(
domain=const.DOMAIN,
unique_id=TEST_GATEWAY_ID,
data={
const.CONF_CLOUD_USERNAME: None,
const.CONF_CLOUD_PASSWORD: None,
const.CONF_CLOUD_COUNTRY: None,
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC,
},
title=TEST_NAME,
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_REAUTH},
data=config_entry.data,
)
assert result["type"] == "form"
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result["type"] == "form"
assert result["step_id"] == "cloud"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
},
)
assert result["type"] == "abort"
assert result["reason"] == "reauth_successful"
config_data = config_entry.data.copy()
assert config_data == {
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
const.CONF_CLOUD_USERNAME: TEST_CLOUD_USER,
const.CONF_CLOUD_PASSWORD: TEST_CLOUD_PASS,
const.CONF_CLOUD_COUNTRY: TEST_CLOUD_COUNTRY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC,
}
|
|
# -*- coding: utf-8 -*-
from flask import Flask, request, jsonify
from flask.ext.basicauth import BasicAuth
from .database import AppDatabase
from .storage import InstanceStorage, InstanceNotFound
from .models import canonicalize_db_name
import plans
app = Flask('postgresapi')
app.config.from_pyfile('application.cfg')
basic_auth = BasicAuth(app)
app.config['BASIC_AUTH_FORCE'] = True
AppDatabase(app)
@app.errorhandler(500)
def internal_server_error(e):
if e.args:
return e.args[-1], 500
else:
return 'Unknown internal server error', 500
@app.route("/plans", methods=["GET"])
def list_plans():
"""List the available plans
$ tsuru service-info postgres
"""
return jsonify(plans.list_active()), 200
@app.route("/resources", methods=["POST"])
def create_instance():
"""create a new database
$ tsuru service-add postgres postgres_instance
Possible HTTP status codes:
* 201: database is successfully created
* 400: bad request, check your query
* 500: creation process is failed
"""
if 'name' not in request.form:
return 'Parameter `name` is missing', 400
name = request.form['name']
if not name:
return 'Parameter `name` is empty', 400
plan = 'shared'
if 'plan' in request.form:
plan = request.form['plan']
plans.get_manager_by_plan(plan).create_instance(canonicalize_db_name(name))
return '', 201
@app.route("/resources/<name>/bind-app", methods=["POST"])
def bind_app(name):
"""Bind an app to the database
$ tsuru bind postgres_instance --app my_app
Possible HTTP status codes:
* 201: database user is successfully created, with these environment
variables are returned:
- PG_HOST
- PG_PORT
- PG_DATABASE
- PG_USER
- PG_PASSWORD
* 400: bad request, check your query
* 404: database does not exist
* 412: database is not ready
* 500: user creation process is failed
"""
name = canonicalize_db_name(name)
if 'app-host' not in request.form:
return 'Parameter `app-host` is missing', 400
hostname = request.form['app-host']
if not hostname:
return 'Parameter `app-host` is empty', 400
try:
storage = InstanceStorage()
instance = storage.instance_by_name(name)
except InstanceNotFound:
return 'Instance `%s` is not found' % name, 404
if instance.state != 'running':
return 'Can\'t bind to this instance because it\'s not running', 412
username, password = instance.create_user(hostname)
config = {
'PG_HOST': instance.get_public_host(),
'PG_PORT': str(instance.get_port()),
'PG_DATABASE': instance.name,
'PG_USER': username,
'PG_PASSWORD': password
}
if instance.plan == 'dedicated':
config['PG_ADMIN_USER'] = instance.username
config['PG_ADMIN_PASSWORD'] = instance.password
return jsonify(config), 201
@app.route("/resources/<name>/bind", methods=["POST"])
def bind_unit(name):
"""Bind an app unit to the database
"""
name = canonicalize_db_name(name)
storage = InstanceStorage()
if storage.instance_exists(name):
return jsonify({}), 201
else:
return 'Instance `%s` is not found' % name, 404
@app.route("/resources/<name>/bind", methods=["DELETE"])
def unbind_unit(name):
"""Unbind an app unit to the database
"""
name = canonicalize_db_name(name)
storage = InstanceStorage()
if storage.instance_exists(name):
return jsonify({}), 200
else:
return 'Instance `%s` is not found' % name, 404
@app.route("/resources/<name>/bind-app", methods=["DELETE"])
def unbind_app(name):
"""Unbind an app user from the database
$ tsuru unbind postgres_instance --app my_app
Possible HTTP status codes:
* 200: database user is successfully dropped or does not exist
* 404: database does not exist
* 500: user dropping process is failed
"""
name = canonicalize_db_name(name)
if 'app-host' not in request.form:
return 'Parameter `app-host` is missing', 400
hostname = request.form['app-host']
if not hostname:
return 'Parameter `app-host` is empty', 400
try:
storage = InstanceStorage()
instance = storage.instance_by_name(name)
except InstanceNotFound:
return 'Instance `%s` is not found' % name, 404
if instance.state != 'running':
return 'Can\'t unbind to this instance because it\'s not running', 500
instance.drop_user(hostname)
return '', 200
@app.route("/resources/<name>", methods=["DELETE"])
def destroy_instance(name):
"""Destroy an database
$ tsuru service-remove postgres_instance
Possible HTTP status codes:
* 200: database is successfully dropped
* 404: database does not exist
* 500: dropping process is failed
"""
name = canonicalize_db_name(name)
try:
storage = InstanceStorage()
instance = storage.instance_by_name(name)
manager = plans.get_manager_by_instance(instance)
manager.delete_instance(instance)
except InstanceNotFound:
return 'Can\'t drop `%s` because it doesn\'t exist' % name, 404
return '', 200
@app.route("/resources/<name>/status", methods=["GET"])
def status(name):
"""Check instance status
$ tsuru service-status postgres_instance
Possible HTTP status codes:
* 202: database is pending
* 204: database is running and ready for connections
* 500: database is stopped for some reason
"""
name = canonicalize_db_name(name)
try:
storage = InstanceStorage()
instance = storage.instance_by_name(name)
except InstanceNotFound:
return 'Instance `%s` is not found' % name, 404
if instance.state == 'pending':
return instance.state, 202
elif instance.is_up():
return '', 204
return '', 500
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._sign_up_settings_operations import build_create_or_update_request, build_get_entity_tag_request, build_get_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SignUpSettingsOperations:
"""SignUpSettingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~api_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_entity_tag(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> bool:
"""Gets the entity state (Etag) version of the SignUpSettings.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_entity_tag_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
template_url=self.get_entity_tag.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalsettings/signup'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> "_models.PortalSignupSettings":
"""Get Sign Up Settings for the Portal.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PortalSignupSettings, or the result of cls(response)
:rtype: ~api_management_client.models.PortalSignupSettings
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PortalSignupSettings"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('PortalSignupSettings', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalsettings/signup'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
service_name: str,
if_match: str,
parameters: "_models.PortalSignupSettings",
**kwargs: Any
) -> None:
"""Update Sign-Up settings.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Update Sign-Up settings.
:type parameters: ~api_management_client.models.PortalSignupSettings
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PortalSignupSettings')
request = build_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
if_match=if_match,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalsettings/signup'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
service_name: str,
parameters: "_models.PortalSignupSettings",
if_match: Optional[str] = None,
**kwargs: Any
) -> "_models.PortalSignupSettings":
"""Create or Update Sign-Up settings.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param parameters: Create or update parameters.
:type parameters: ~api_management_client.models.PortalSignupSettings
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PortalSignupSettings, or the result of cls(response)
:rtype: ~api_management_client.models.PortalSignupSettings
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PortalSignupSettings"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PortalSignupSettings')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PortalSignupSettings', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalsettings/signup'} # type: ignore
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import random, time
from PIL import Image, ImageTk
from Tkinter import Tk, Frame, Canvas, ALL, NW
WIDTH = 300
HEIGHT = 300
DELAY = 100
DOT_SIZE = 10
ALL_DOTS = WIDTH * HEIGHT / (DOT_SIZE * DOT_SIZE)
RAND_POS = 27
RESIZE = 5, 5
x = [0] * ALL_DOTS
y = [0] * ALL_DOTS
class Board(Canvas):
def __init__(self, parent):
Canvas.__init__(self, width=WIDTH, height=HEIGHT,
background="black", highlightthickness=0)
self.parent = parent
self.initGame()
self.pack()
def initGame(self):
self.left = False
self.right = True
self.up = False
self.down = False
self.inGame = True
self.dots = 3
self.apple_x = 100
self.apple_y = 190
for i in range(self.dots):
x[i] = 50 - i * 10
y[i] = 50
try:
self.idot = Image.open("dot.png")
self.idot.thumbnail(RESIZE)
self.dot = ImageTk.PhotoImage(self.idot)
self.ihead = Image.open("head.png")
self.ihead.thumbnail(RESIZE)
self.head = ImageTk.PhotoImage(self.ihead)
self.iapple = Image.open("apple.png")
self.iapple.thumbnail(RESIZE)
self.apple = ImageTk.PhotoImage(self.iapple)
except IOError, e:
print e
sys.exit(1)
self.focus_get()
self.createObjects()
self.locateApple()
self.bind_all("<Key>", self.onKeyPressed)
self.after(DELAY, self.onTimer)
def createObjects(self):
self.create_image(self.apple_x, self.apple_y, image=self.apple,
anchor=NW, tag="apple")
self.create_image(50, 50, image=self.head, anchor=NW, tag="head")
self.create_image(30, 50, image=self.dot, anchor=NW, tag="dot")
self.create_image(40, 50, image=self.dot, anchor=NW, tag="dot")
def checkApple(self):
apple = self.find_withtag("apple")
head = self.find_withtag("head")
x1, y1, x2, y2 = self.bbox(head)
overlap = self.find_overlapping(x1, y1, x2, y2)
for ovr in overlap:
if apple[0] == ovr:
x, y = self.coords(apple)
self.create_image(x, y, image=self.dot, anchor=NW, tag="dot")
self.locateApple()
def doMove(self):
dots = self.find_withtag("dot")
head = self.find_withtag("head")
items = dots + head
z = 0
while z < len(items)-1:
c1 = self.coords(items[z])
c2 = self.coords(items[z+1])
self.move(items[z], c2[0]-c1[0], c2[1]-c1[1])
z += 1
time.sleep(0.05)
if self.left:
self.move(head, -DOT_SIZE, 0)
if self.right:
self.move(head, DOT_SIZE, 0)
if self.up:
self.move(head, 0, -DOT_SIZE)
if self.down:
self.move(head, 0, DOT_SIZE)
def checkCollisions(self):
dots = self.find_withtag("dot")
head = self.find_withtag("head")
x1, y1, x2, y2 = self.bbox(head)
overlap = self.find_overlapping(x1, y1, x2, y2)
for dot in dots:
for over in overlap:
if over == dot:
self.inGame = False
if x1 < 0:
self.inGame = False
if x1 > WIDTH - DOT_SIZE:
self.inGame = False
if y1 < 0:
self.inGame = False
if y1 > HEIGHT - DOT_SIZE:
self.inGame = False
def locateApple(self):
apple = self.find_withtag("apple")
self.delete(apple[0])
r = random.randint(0, RAND_POS)
self.apple_x = r * DOT_SIZE
r = random.randint(0, RAND_POS)
self.apple_y = r * DOT_SIZE
self.create_image(self.apple_x, self.apple_y, anchor=NW,
image=self.apple, tag="apple")
def onKeyPressed(self, e):
key = e.keysym
if key == "Left" and not self.right:
self.left = True
self.up = False
self.down = False
if key == "Right" and not self.left:
self.right = True
self.up = False
self.down = False
if key == "Up" and not self.down:
self.up = True
self.right = False
self.left = False
if key == "Down" and not self.up:
self.down = True
self.right = False
self.left = False
def onTimer(self):
if self.inGame:
self.checkCollisions()
self.checkApple()
self.doMove()
self.after(DELAY, self.onTimer)
else:
self.gameOver()
def gameOver(self):
self.delete(ALL)
self.create_text(self.winfo_width()/2, self.winfo_height()/2,
text="Game Over", fill="white")
class Nibbles(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
parent.title('Nibbles')
self.board = Board(parent)
self.pack()
def main():
root = Tk()
nib = Nibbles(root)
root.mainloop()
if __name__ == '__main__':
main()
|
|
import falcon
import bcrypt
import base64
from db import User, Session
from sqlalchemy import exc
import smtplib
from email.mime.text import MIMEText
from random import randint
import datetime
def unauthorized_user(req):
req.context['user'] = None
description = "User was not found in database"
title = "Unauthorized Access"
raise falcon.HTTPUnauthorized(title=title, description=description)
def get_user(req, resp):
if 'user' in req.context:
user = Session.query(User).get(req.context['user'])
if user is None:
unauthorized_user(req)
if 'doc' in req.context:
doc = req.context['doc']
if 'email' in doc:
user = Session.query(User).filter_by(email=doc['email']).first()
if user is None:
unauthorized_user(req)
if 'password' in doc:
if not user.check_password(doc['password']):
unauthorized_user(req)
return user
class Item(object):
def on_get(self, req, resp, item_id):
user = get_user(req, resp)
if user.is_admin():
user = Session.query(User).get(item_id)
signins_json = []
for signin in user.signins:
date = signin.date_in.strftime("%Y-%m-%d")
signins_json.append({'id': signin.id, 'date': date})
req.context['result'] = {
'action': 'get signins',
'result': 'success',
'signins': signins_json
}
resp.status = falcon.HTTP_200
else:
description = "User is not administrator and can not access this information"
title = "User unauthorized"
raise falcon.HTTPUnauthorized(title=title, description=description)
def on_post(self,req,resp,item_id):
user=get_user(req,resp)
if user.is_admin():
user=Session.query(User).get(item_id)
user.scheduleverified=True
user.finalschedule=user.schedule
Session.commit()
resp.status=falcon.HTTP_200
req.context['result']={'action':'verify_schedule','result':'success'}
else:
description = "User is not administrator and can not change this information"
title = "User unauthorized"
raise falcon.HTTPUnauthorized(title=title, description=description)
class Confirm(object):
def on_get(self,req,resp,code):
users=Session.query(User).all()
for user in users:
if str(user.code)==str(code):
user.emailVerified=True
resp.body="SUCCESS"
Session.commit()
class Register(object):
def on_post(self, req, resp):
doc = req.context['doc']
users=Session.query(User).all()
unique=True
for user in users:
if doc['email'] == user.email:
unique=False
if unique:
user = User(name=doc['name'], email=doc['email'].lower(), signedin=False,registerTime=datetime.datetime.today())
print(datetime.datetime.today())
user.salt = bcrypt.gensalt()
user.pw_hash = bcrypt.hashpw(doc['password'].encode('utf-8'), user.salt)
s=smtplib.SMTP_SSL('smtp.gmail.com',465)
s.ehlo()
s.login('phoenixnownoreply@gmail.com','helloworld@ppclub')
code=randint(1000000,10000000)
user.code=code
msg=MIMEText('Hi '+user.name+', your verification URL is: '+'http://192.168.1.127:8000/confirmation/'+str(code))
msg['From']='phoenixnownoreply@gmail.com'
msg['To']=user.email
msg['Subject']='PhoenixNow Account Confirmation'
s.send_message(msg)
s.close()
Session.add(user)
Session.flush()
Session.commit()
req.context['user'] = user.id
req.context['result'] = {"result": "success", "action": "register"}
else:
user=get_user(req,resp)
td=datetime.timedelta(minutes=30)
if datetime.datetime.today()-td<user.registerTime or user.emailVerified==True:
description = "User was already made"
title = "User creation conflict"
raise falcon.HTTPConflict(title=title, description=description)
else:
Session.delete(user)
Session.flush()
user = User(name=doc['name'], email=doc['email'], signedin=False,registerTime=datetime.datetime.today())
print(datetime.datetime.today())
user.salt = bcrypt.gensalt()
user.pw_hash = bcrypt.hashpw(doc['password'].encode('utf-8'), user.salt)
s=smtplib.SMTP_SSL('smtp.gmail.com',465)
s.ehlo()
s.login('phoenixnownoreply@gmail.com','helloworld@ppclub')
code=randint(1000000,10000000)
user.code=code
msg=MIMEText('Hi '+user.name+', your verification URL is: '+'http://192.168.1.127:8000/confirmation/'+str(code))
msg['From']='phoenixnownoreply@gmail.com'
msg['To']=user.email
msg['Subject']='PhoenixNow Account Confirmation'
s.send_message(msg)
s.close()
Session.add(user)
Session.flush()
Session.commit()
req.context['user'] = user.id
req.context['result'] = {"result": "success", "action": "register"}
class Collection(object):
def on_get(self, req, resp):
user = get_user(req, resp)
if user.is_admin():
json_users = []
users = Session.query(User).all()
for user in users:
json_user = {
"id": user.id,
"name": user.name,
"email": user.email,
"signedin": str(user.signedin),
"schedule":user.schedule,
"scheduleverified":str(user.scheduleverified),
"finalschedule":user.finalschedule
}
json_users.append(json_user)
req.context['result'] = {"users": json_users}
else:
description = "User is not administrator and can not access this information"
title = "User unauthorized"
raise falcon.HTTPUnauthorized(title=title, description=description)
class Login(object):
def on_post(self, req, resp):
user = get_user(req, resp)
if user.emailVerified==True:
req.context['user'] = user.id
req.context['result'] = {"result": "success", "action": "login"}
resp.status = falcon.HTTP_200
else:
raise falcon.HTTPUnauthorized('Authentication required',
'User unverified',
href='http://docs.example.com/auth',
scheme='Token; UUID')
resp.body='{"result":"failure"}'
class Logout(object):
def on_get(self, req, resp):
req.context['user'] = None
resp.status = falcon.HTTP_200
req.context['result'] = {"result": "success", "action": "logout"}
class Schedule(object):
def on_post(self, req,resp):
doc=req.context['doc']
m=doc['M']
t=doc['T']
w=doc['W']
r=doc['R']
f=doc['F']
user=get_user(req,resp)
user.schedule=""
if m=="true":
user.schedule=user.schedule+"M"
if t=="true":
user.schedule=user.schedule+"T"
if w=="true":
user.schedule=user.schedule+"W"
if r=="true":
user.schedule=user.schedule+"R"
if f=="true":
user.schedule=user.schedule+"F"
Session.commit()
class GetSchedule(object):
def on_post(self,req,resp):
user=get_user(req,resp)
req.context['result']={"VerifiedSchedule":user.finalschedule, "SubmittedSchedule":user.schedule}
resp.status=falcon.HTTP_200
|
|
# vim: set fileencoding=utf-8:
#
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
#
# Copyright (c) 2016-2021 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2019 Ben Nuttall <ben@bennuttall.com>
# Copyright (c) 2016-2019 Andrew Scheller <github@loowis.durge.org>
# Copyright (c) 2018 Philippe Muller <philippe.muller@gmail.com>
#
# SPDX-License-Identifier: BSD-3-Clause
import sys
import pytest
import warnings
from time import sleep
from threading import Event
from functools import partial
from unittest import mock
from conftest import ThreadedTest
from gpiozero.pins.mock import MockChargingPin, MockTriggerPin
from gpiozero.threads import GPIOThread
from gpiozero import *
def test_input_initial_values(mock_factory):
pin = mock_factory.pin(4)
with InputDevice(4, pull_up=True) as device:
assert repr(device).startswith('<gpiozero.InputDevice object')
assert pin.function == 'input'
assert pin.pull == 'up'
assert device.pull_up
assert repr(device) == '<gpiozero.InputDevice object closed>'
with InputDevice(4, pull_up=False) as device:
assert pin.pull == 'down'
assert not device.pull_up
def test_input_is_active_low(mock_factory):
pin = mock_factory.pin(2)
with InputDevice(2, pull_up=True) as device:
pin.drive_high()
assert not device.is_active
assert repr(device) == '<gpiozero.InputDevice object on pin GPIO2, pull_up=True, is_active=False>'
pin.drive_low()
assert device.is_active
assert repr(device) == '<gpiozero.InputDevice object on pin GPIO2, pull_up=True, is_active=True>'
def test_input_is_active_high(mock_factory):
pin = mock_factory.pin(4)
with InputDevice(4, pull_up=False) as device:
pin.drive_high()
assert device.is_active
assert repr(device) == '<gpiozero.InputDevice object on pin GPIO4, pull_up=False, is_active=True>'
pin.drive_low()
assert not device.is_active
assert repr(device) == '<gpiozero.InputDevice object on pin GPIO4, pull_up=False, is_active=False>'
def test_input_pulled_up(mock_factory):
pin = mock_factory.pin(2)
with pytest.raises(PinFixedPull):
InputDevice(2, pull_up=False)
def test_input_is_active_low_externally_pulled_up(mock_factory):
pin = mock_factory.pin(4)
device = InputDevice(4, pull_up=None, active_state=False)
pin.drive_high()
assert repr(device) == '<gpiozero.InputDevice object on pin GPIO4, pull_up=None, is_active=False>'
assert not device.is_active
pin.drive_low()
assert repr(device) == '<gpiozero.InputDevice object on pin GPIO4, pull_up=None, is_active=True>'
assert device.is_active
def test_input_is_active_high_externally_pulled_down(mock_factory):
pin = mock_factory.pin(4)
device = InputDevice(4, pull_up=None, active_state=True)
pin.drive_high()
assert repr(device) == '<gpiozero.InputDevice object on pin GPIO4, pull_up=None, is_active=True>'
assert device.is_active
pin.drive_low()
assert repr(device) == '<gpiozero.InputDevice object on pin GPIO4, pull_up=None, is_active=False>'
assert not device.is_active
def test_input_invalid_pull_up(mock_factory):
with pytest.raises(PinInvalidState) as exc:
InputDevice(4, pull_up=None)
assert str(exc.value) == 'Pin 4 is defined as floating, but "active_state" is not defined'
def test_input_invalid_active_state(mock_factory):
with pytest.raises(PinInvalidState) as exc:
InputDevice(4, active_state=True)
assert str(exc.value) == 'Pin 4 is not floating, but "active_state" is not None'
def test_input_event_activated(mock_factory):
event = Event()
pin = mock_factory.pin(4)
with DigitalInputDevice(4) as device:
assert repr(device).startswith('<gpiozero.DigitalInputDevice object')
device.when_activated = lambda: event.set()
assert not event.is_set()
pin.drive_high()
assert event.is_set()
def test_input_event_deactivated(mock_factory):
event = Event()
pin = mock_factory.pin(4)
with DigitalInputDevice(4) as device:
device.when_deactivated = lambda: event.set()
assert not event.is_set()
pin.drive_high()
assert not event.is_set()
pin.drive_low()
assert event.is_set()
def test_input_activated_callback_warning(mock_factory):
def foo(): pass
with DigitalInputDevice(4) as device:
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
device.when_activated = foo()
assert len(w) == 1
assert w[0].category == CallbackSetToNone
with DigitalInputDevice(4) as device:
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
device.when_deactivated = foo()
assert len(w) == 1
assert w[0].category == CallbackSetToNone
def test_input_partial_callback(mock_factory):
event = Event()
pin = mock_factory.pin(4)
def foo(a, b):
event.set()
return a + b
bar = partial(foo, 1)
baz = partial(bar, 2)
with DigitalInputDevice(4) as device:
device.when_activated = baz
assert not event.is_set()
pin.drive_high()
assert event.is_set()
def test_input_wait_active(mock_factory):
pin = mock_factory.pin(4)
with DigitalInputDevice(4) as device:
pin.drive_high()
assert device.wait_for_active(1)
assert not device.wait_for_inactive(0)
def test_input_wait_inactive(mock_factory):
pin = mock_factory.pin(4)
with DigitalInputDevice(4) as device:
assert device.wait_for_inactive(1)
assert not device.wait_for_active(0)
def test_input_init_fail(mock_factory):
with pytest.raises(ValueError):
DigitalInputDevice(4, bounce_time='foo')
with pytest.raises(ValueError):
SmoothedInputDevice(4, threshold='foo')
def test_input_smoothed_attrib(mock_factory):
pin = mock_factory.pin(4)
with SmoothedInputDevice(4, threshold=0.5, queue_len=5, partial=False) as device:
assert repr(device) == '<gpiozero.SmoothedInputDevice object on pin GPIO4, pull_up=False>'
assert device.threshold == 0.5
assert device.queue_len == 5
assert not device.partial
device._queue.start()
assert not device.is_active
assert repr(device) == '<gpiozero.SmoothedInputDevice object on pin GPIO4, pull_up=False, is_active=False>'
with pytest.raises(InputDeviceError):
device.threshold = 1
assert repr(device) == '<gpiozero.SmoothedInputDevice object closed>'
with pytest.raises(BadQueueLen):
SmoothedInputDevice(4, queue_len=-1)
with pytest.raises(BadWaitTime):
SmoothedInputDevice(4, sample_wait=-1)
def test_input_smoothed_values(mock_factory):
pin = mock_factory.pin(4)
with SmoothedInputDevice(4) as device:
device._queue.start()
assert not device.is_active
pin.drive_high()
assert device.wait_for_active(1)
pin.drive_low()
assert device.wait_for_inactive(1)
def test_input_button(mock_factory):
pin = mock_factory.pin(2)
with Button(2) as button:
assert repr(button).startswith('<gpiozero.Button object')
assert pin.pull == 'up'
assert not button.is_pressed
pin.drive_low()
assert button.is_pressed
assert button.wait_for_press(1)
pin.drive_high()
assert not button.is_pressed
assert button.wait_for_release(1)
def test_input_button_hold(mock_factory):
pin = mock_factory.pin(2)
evt = Event()
evt2 = Event()
with Button(2) as button:
with pytest.raises(ValueError):
button.hold_time = -1
button.hold_time = 0.1
assert button.hold_time == 0.1
assert not button.hold_repeat
assert button.when_held is None
button.when_held = evt.set
assert button.when_held is not None
pin.drive_low()
assert evt.wait(1)
assert button.is_held
assert button.held_time >= 0.0
pin.drive_high()
evt.clear()
assert button.held_time is None
assert not button.is_held
button.hold_repeat = True
pin.drive_low()
assert evt.wait(1)
evt.clear()
assert evt.wait(1)
pin.drive_high()
evt.clear()
assert not evt.wait(0.1)
def test_input_line_sensor(mock_factory):
pin = mock_factory.pin(4)
with LineSensor(4) as sensor:
assert repr(sensor).startswith('<gpiozero.LineSensor object')
pin.drive_low() # logic is inverted for line sensor
assert sensor.wait_for_line(1)
assert sensor.line_detected
pin.drive_high()
assert sensor.wait_for_no_line(1)
assert not sensor.line_detected
def test_input_motion_sensor(mock_factory):
pin = mock_factory.pin(4)
with MotionSensor(4) as sensor:
assert repr(sensor).startswith('<gpiozero.MotionSensor object')
pin.drive_high()
assert sensor.wait_for_motion(1)
assert sensor.motion_detected
pin.drive_low()
assert sensor.wait_for_no_motion(1)
assert not sensor.motion_detected
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_input_light_sensor(mock_factory):
pin = mock_factory.pin(4, pin_class=MockChargingPin)
assert isinstance(pin, MockChargingPin)
with LightSensor(4) as sensor:
assert repr(sensor).startswith('<gpiozero.LightSensor object')
pin.charge_time = 0.1
assert sensor.wait_for_dark(1)
pin.charge_time = 0.0
assert sensor.wait_for_light(1)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_input_distance_sensor(mock_factory):
echo_pin = mock_factory.pin(4)
trig_pin = mock_factory.pin(5, pin_class=MockTriggerPin,
echo_pin=echo_pin, echo_time=0.02)
with pytest.raises(ValueError):
DistanceSensor(4, 5, max_distance=-1)
# normal queue len is large (because the sensor is *really* jittery) but
# we want quick tests and we've got precisely controlled pins :)
with DistanceSensor(4, 5, queue_len=5, max_distance=1) as sensor:
assert repr(sensor).startswith('<gpiozero.DistanceSensor object')
assert sensor.max_distance == 1
assert sensor.trigger is trig_pin
assert sensor.echo is echo_pin
assert sensor.wait_for_out_of_range(1)
assert not sensor.in_range
# should be waay before max-distance so this should work
assert sensor.distance == 1.0
trig_pin.echo_time = 0.0
assert sensor.wait_for_in_range(1)
assert sensor.in_range
# depending on speed of machine, may not reach 0 here
assert sensor.distance < sensor.threshold_distance
sensor.threshold_distance = 0.1
assert sensor.threshold_distance == 0.1
with pytest.raises(ValueError):
sensor.max_distance = -1
sensor.max_distance = 20
assert sensor.max_distance == 20
assert sensor.threshold_distance == 0.1
def test_input_distance_sensor_edge_cases(mock_factory):
echo_pin = mock_factory.pin(4)
trig_pin = mock_factory.pin(5) # note: normal pin
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
with DistanceSensor(4, 5, queue_len=5, max_distance=1, partial=True) as sensor:
# Test we get a warning about the echo pin being set high
echo_pin.drive_high()
sleep(0.5)
assert sensor.value == 0
# Test we get a warning about receiving no echo
echo_pin.drive_low()
sleep(0.5)
for rec in w:
if str(rec.message) == 'echo pin set high':
break
else:
assert False
for rec in w:
if str(rec.message) == 'no echo received':
break
else:
assert False
def rotate_cw(a_pin, b_pin):
a_pin.drive_low()
b_pin.drive_low()
a_pin.drive_high()
b_pin.drive_high()
def rotate_ccw(a_pin, b_pin):
b_pin.drive_low()
a_pin.drive_low()
b_pin.drive_high()
a_pin.drive_high()
def test_input_rotary_encoder(mock_factory):
a_pin = mock_factory.pin(20)
b_pin = mock_factory.pin(21)
with pytest.raises(ValueError):
RotaryEncoder(20, 21, threshold_steps=(2, 0))
with RotaryEncoder(20, 21) as encoder:
assert repr(encoder).startswith('<gpiozero.RotaryEncoder object')
assert encoder.steps == 0
assert encoder.value == 0
assert not encoder.wrap
a_pin.drive_low()
b_pin.drive_low()
# Make sure we don't erroneously jump before the end of the sequence
assert encoder.steps == 0
a_pin.drive_high()
b_pin.drive_high()
assert encoder.steps == 1
# Make sure the sequence works in both directions
rotate_ccw(a_pin, b_pin)
assert encoder.steps == 0
assert repr(encoder) == '<gpiozero.RotaryEncoder object closed>'
def test_input_rotary_encoder_jiggle(mock_factory):
a_pin = mock_factory.pin(20)
b_pin = mock_factory.pin(21)
with RotaryEncoder(20, 21) as encoder:
# Check the FSM permits "jiggle" in the sequence
a_pin.drive_low()
a_pin.drive_high()
a_pin.drive_low()
b_pin.drive_low()
b_pin.drive_high()
b_pin.drive_low()
a_pin.drive_high()
a_pin.drive_low()
a_pin.drive_high()
b_pin.drive_high()
assert encoder.steps == 1
def test_input_rotary_encoder_limits(mock_factory):
a_pin = mock_factory.pin(20)
b_pin = mock_factory.pin(21)
with RotaryEncoder(20, 21, max_steps=4) as encoder:
assert not encoder.wrap
for expected in [1, 2, 3, 4, 4, 4]:
rotate_cw(a_pin, b_pin)
assert encoder.steps == expected
def test_input_rotary_encoder_threshold(mock_factory):
a_pin = mock_factory.pin(20)
b_pin = mock_factory.pin(21)
with RotaryEncoder(20, 21, max_steps=4, threshold_steps=(2, 4)) as encoder:
assert encoder.threshold_steps == (2, 4)
for expected in [1, 2, 3, 4]:
rotate_cw(a_pin, b_pin)
assert encoder.is_active == (2 <= encoder.steps <= 4)
def test_input_rotary_encoder_settable(mock_factory):
a_pin = mock_factory.pin(20)
b_pin = mock_factory.pin(21)
with RotaryEncoder(20, 21, max_steps=4) as encoder:
assert encoder.max_steps == 4
assert encoder.steps == 0
assert encoder.value == 0
rotate_cw(a_pin, b_pin)
assert encoder.steps == 1
assert encoder.value == 0.25
encoder.steps = 0
assert encoder.steps == 0
assert encoder.value == 0
rotate_ccw(a_pin, b_pin)
assert encoder.steps == -1
assert encoder.value == -0.25
encoder.value = 0
assert encoder.steps == 0
assert encoder.value == 0
with RotaryEncoder(20, 21, max_steps=0) as encoder:
assert encoder.max_steps == 0
assert encoder.steps == 0
assert encoder.value == 0
rotate_cw(a_pin, b_pin)
assert encoder.steps == 1
# value is perpetually 0 when max_steps is 0
assert encoder.value == 0
encoder.steps = 0
assert encoder.steps == 0
assert encoder.value == 0
def test_input_rotary_encoder_wrap(mock_factory):
a_pin = mock_factory.pin(20)
b_pin = mock_factory.pin(21)
with RotaryEncoder(20, 21, max_steps=4, wrap=True) as encoder:
assert encoder.wrap
for expected in [1, 2, 3, 4, -4, -3, -2, -1, 0]:
rotate_cw(a_pin, b_pin)
assert encoder.steps == expected
def test_input_rotary_encoder_when(mock_factory):
a_pin = mock_factory.pin(20)
b_pin = mock_factory.pin(21)
with RotaryEncoder(20, 21) as encoder:
rotated = Event()
rotated_cw = Event()
rotated_ccw = Event()
assert encoder.when_rotated is None
assert encoder.when_rotated_clockwise is None
assert encoder.when_rotated_counter_clockwise is None
encoder.when_rotated = rotated.set
encoder.when_rotated_clockwise = rotated_cw.set
encoder.when_rotated_counter_clockwise = rotated_ccw.set
assert encoder.when_rotated is not None
assert encoder.when_rotated_clockwise is not None
assert encoder.when_rotated_counter_clockwise is not None
assert callable(encoder.when_rotated)
assert callable(encoder.when_rotated_clockwise)
assert callable(encoder.when_rotated_counter_clockwise)
assert not rotated.wait(0)
assert not rotated_cw.wait(0)
assert not rotated_ccw.wait(0)
rotate_cw(a_pin, b_pin)
assert rotated.wait(0)
assert rotated_cw.wait(0)
assert not rotated_ccw.wait(0)
rotated.clear()
rotated_cw.clear()
rotate_ccw(a_pin, b_pin)
assert rotated.wait(0)
assert not rotated_cw.wait(0)
assert rotated_ccw.wait(0)
def test_input_rotary_encoder_wait(mock_factory):
a_pin = mock_factory.pin(20)
b_pin = mock_factory.pin(21)
with RotaryEncoder(20, 21) as encoder:
# The rotary encoder waits are "pulsed", i.e. they act like edge waits
# rather than level waits hence the need for a background thread here
# that actively attempts to wait on rotation while it happens. It's
# not enough to rotate and *then* attempt to wait
test_rotate = ThreadedTest(lambda: encoder.wait_for_rotate(0))
test_rotate_cw = ThreadedTest(lambda: encoder.wait_for_rotate_clockwise(0))
test_rotate_ccw = ThreadedTest(lambda: encoder.wait_for_rotate_counter_clockwise(0))
assert not test_rotate.result
assert not test_rotate_cw.result
assert not test_rotate_ccw.result
test_thread = ThreadedTest(lambda: encoder.wait_for_rotate(1))
test_thread_cw = ThreadedTest(lambda: encoder.wait_for_rotate_clockwise(1))
test_thread_ccw = ThreadedTest(lambda: encoder.wait_for_rotate_counter_clockwise(1))
rotate_cw(a_pin, b_pin)
assert test_thread.result
assert test_thread_cw.result
assert not test_thread_ccw.result
test_rotate = ThreadedTest(lambda: encoder.wait_for_rotate(0))
test_rotate_cw = ThreadedTest(lambda: encoder.wait_for_rotate_clockwise(0))
test_rotate_ccw = ThreadedTest(lambda: encoder.wait_for_rotate_counter_clockwise(0))
assert not test_rotate.result
assert not test_rotate_cw.result
assert not test_rotate_ccw.result
test_thread = ThreadedTest(lambda: encoder.wait_for_rotate(1))
test_thread_cw = ThreadedTest(lambda: encoder.wait_for_rotate_clockwise(1))
test_thread_ccw = ThreadedTest(lambda: encoder.wait_for_rotate_counter_clockwise(1))
rotate_ccw(a_pin, b_pin)
assert test_thread.result
assert not test_thread_cw.result
assert test_thread_ccw.result
|
|
# the entire process of aligning and calculating bias
# parameters
# - donor: fasta file of target
# - reference: fasta file of reference
# - fastq: fastq files
import argparse
import os
import random
import bio
import config
def run( cmd ):
'''
run a system command
'''
bio.log_stderr( cmd )
os.system( cmd )
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compare BAMs')
parser.add_argument('--donor', help='donor fasta')
parser.add_argument('--reference', help='reference fasta')
parser.add_argument('--job', required=False, help='use to continue previous pipeline')
parser.add_argument('--start', required=False, help='start from this stage')
parser.add_argument('--tmpdir', required=False, help='where to write files')
parser.add_argument('fastq', help='fastq files to align')
args = parser.parse_args()
# now do each stage...
if args.job:
idx = int(args.job)
else:
idx = random.randint(1, 1e6)
if args.start:
start = int(args.start)
else:
start = 0
if args.tmpdir:
tmpdir = args.tmpdir
else:
tmpdir = '/tmp'
bio.log_stderr( 'Job ID: %i, Starting at stage %i' % (idx, start) )
# TODO error correction (ec)
# fasta indexes
stage = 1
if start <= stage:
run( '%s index %s' % ( config.BWA_PATH, args.donor ) )
run( '%s index %s' % ( config.BWA_PATH, args.reference ) )
bio.log_stderr( 'Stage %i: Indexing completed' % stage )
stage += 1 # 2
# alignment (aln)
if start <= stage:
#run( '%s mem -t 8 %s %s > %s/donor%i.sam' % ( config.BWA_PATH, args.donor, args.fastq, tmpdir, idx ) )
run( 'ln -s %s/donor.sam %s/donor%i.sam' % ( tmpdir, tmpdir, idx ) )
bio.log_stderr( 'Stage %i: Donor alignment completed' % stage )
stage += 1 # 3
if start <= stage:
run( '%s mem -t 8 %s %s > %s/reference%i.sam' % ( config.BWA_PATH, args.reference, args.fastq, tmpdir, idx ) )
bio.log_stderr( 'Stage %i: Reference alignment completed' % stage )
# genome alignment (mauve)
stage += 1 # 4
if start <= stage:
run( '%s --output=%s/mauve%i %s %s' % ( config.MAUVE_PATH, tmpdir, idx, args.donor, args.reference ) )
bio.log_stderr( 'Stage %i: Mauve completed' % stage )
# realignment
stage += 1 # 5
if start <= stage:
donor_accession = open( args.donor, 'r' ).readline().strip().split()[0][1:]
run( 'python remap_bam.py --xmfa %s/mauve%i --origin 2 --target 1 --output_not_covered %s/notcovered%i.sam --output_target_coverage %s/mauve_target%i.bed --output %s/remapped%i.sam %s/reference%i.sam --new_reference \'%s\' > %s/remap_bam%i.stats' % ( tmpdir, idx, tmpdir, idx, tmpdir, idx, tmpdir, idx, tmpdir, idx, donor_accession, tmpdir, idx ) )
bio.log_stderr( 'Stage %i: Remap completed' % stage )
# converto to bam
stage += 1 # 6
if start <= stage:
#run( 'samtools view -bS %s/donor%i.sam > %s/donor%i.bam' % ( tmpdir, idx, tmpdir, idx ) )
run( 'ln -s %s/donor.bam %s/donor%i.bam' % ( tmpdir, tmpdir, idx ) )
run( 'samtools view -bS %s/reference%i.sam > %s/reference%i.bam' % ( tmpdir, idx, tmpdir, idx ) )
# fix remapped
#with open( '%s/donor%i.sam' % ( tmpdir, idx ), 'r' ) as dfh:
with open( '%s/donor.sam' % ( tmpdir, ), 'r' ) as dfh:
l = (dfh.readline(), dfh.readline())
with open( '%s/remapped%i.head' % ( tmpdir, idx ), 'w' ) as rfh:
rfh.write( l[0] )
rfh.write( l[1] )
#run( 'cat %s/remapped%i.head %s/remapped%i.sam | samtools view -bS - > %s/remapped%i.bam' % ( tmpdir, idx, tmpdir, idx, tmpdir, idx ) )
run( 'samtools view -bS %s/remapped%i.sam > %s/remapped%i.bam' % ( tmpdir, idx, tmpdir, idx ) )
with open( '%s/reference%i.sam' % ( tmpdir, idx ), 'r' ) as dfh:
l = (dfh.readline(), dfh.readline())
with open( '%s/notcovered%i.head' % ( tmpdir, idx ), 'w' ) as rfh:
rfh.write( l[0] )
rfh.write( l[1] )
run( 'cat %s/notcovered%i.head %s/notcovered%i.sam | samtools view -bS - > %s/notcovered%i.bam' % ( tmpdir, idx, tmpdir, idx, tmpdir, idx ) )
bio.log_stderr( 'Stage %i: Convert to bam completed' % stage )
stage += 1 # 7
if start <= stage:
# reads in donor
run( 'samtools flagstat %s/donor%i.bam > %s/donorflag%i.txt' % ( tmpdir, idx, tmpdir, idx ) )
run( 'samtools flagstat %s/reference%i.bam > %s/referenceflag%i.txt' % ( tmpdir, idx, tmpdir, idx ) )
run( 'samtools flagstat %s/remapped%i.bam > %s/remappedflag%i.txt' % ( tmpdir, idx, tmpdir, idx ) )
run( 'samtools flagstat %s/notcovered%i.bam > %s/notcoveredflag%i.txt' % ( tmpdir, idx, tmpdir, idx ) )
# coverage
run( 'bedtools genomecov -ibam %s/donor%i.bam -bga | awk \'$4<1\' > %s/donornotcovered%i.bed' % ( tmpdir, idx, tmpdir, idx ) )
run( 'awk \'$4<1\' %s/donornotcovered%i.bed | awk \'{ print $3-$2; }\' | datamash sum 1 min 1 max 1 mean 1 pstdev 1 count 1 > %s/donor%i.cov' % ( tmpdir, idx, tmpdir, idx ) )
run( 'bedtools genomecov -ibam %s/reference%i.bam -bga | awk \'$4<1\' | awk \'{ print $3-$2; }\' | datamash sum 1 min 1 max 1 mean 1 pstdev 1 count 1 > %s/reference%i.cov' % ( tmpdir, idx, tmpdir, idx ) )
run( 'bedtools genomecov -ibam %s/remapped%i.bam -bga | awk \'$4<1\' | awk \'{ print $3-$2; }\' | datamash sum 1 min 1 max 1 mean 1 pstdev 1 count 1 > %s/remapped%i.cov' % ( tmpdir, idx, tmpdir, idx ) )
run( 'bedtools genomecov -ibam %s/donor%i.bam -d | awk \'$3>0\' | awk \'{ print $3; }\' | datamash sum 1 min 1 max 1 mean 1 pstdev 1 count 1 > %s/donorsum%i.cov' % ( tmpdir, idx, tmpdir, idx ) )
run( 'bedtools genomecov -ibam %s/remapped%i.bam -d | awk \'$3>0\' | awk \'{ print $3; }\' | datamash sum 1 min 1 max 1 mean 1 pstdev 1 count 1 > %s/remappedsum%i.cov' % ( tmpdir, idx, tmpdir, idx ) )
bio.log_stderr( 'Stage %i: Coverage analysis completed' % stage )
stage += 1 # 8
if start <= stage:
run( 'pypy compare_bams.py --compare_position True --subset_detail True --mismatch_detail 1 --xmfa %s/mauve%i --origin 2 --target 1 %s/donor%i.bam %s/remapped%i.bam > %s/compare_bams%i.log' % ( tmpdir, idx, tmpdir, idx, tmpdir, idx, tmpdir, idx ) )
run( 'python extract_mismatched_reads.py --min_distance 50 %s/remapped%i.bam < %s/compare_bams%i.log > %s/mismatched%i.sam' % ( tmpdir, idx, tmpdir, idx, tmpdir, idx ) )
run( 'python extract_mismatched_reads.py --min_distance 1 --max_distance 49 %s/remapped%i.bam < %s/compare_bams%i.log > %s/almost%i.sam' % ( tmpdir, idx, tmpdir, idx, tmpdir, idx ) )
run( 'samtools view -bS %s/mismatched%i.sam > %s/mismatched%i.bam' % ( tmpdir, idx, tmpdir, idx ) )
run( 'samtools view -bS %s/almost%i.sam > %s/almost%i.bam' % ( tmpdir, idx, tmpdir, idx ) )
bio.log_stderr( 'Stage %i: Mismatch analysis completed' % stage )
stage += 1 # 9
if start <= stage:
run( 'samtools flagstat %s/mismatched%i.bam > %s/mismatchedflag%i.txt' % ( tmpdir, idx, tmpdir, idx ) )
run( 'samtools flagstat %s/almost%i.bam > %s/almostflag%i.txt' % ( tmpdir, idx, tmpdir, idx ) )
run( 'bedtools genomecov -ibam %s/mismatched%i.bam -d | awk \'$3>0\' | awk \'{ print $3; }\' | datamash sum 1 min 1 max 1 mean 1 pstdev 1 count 1 > %s/mismatched%i.cov' % ( tmpdir, idx, tmpdir, idx ) )
run( 'bedtools genomecov -ibam %s/notcovered%i.bam -d | awk \'$3>0\' | awk \'{ print $3; }\' | datamash sum 1 min 1 max 1 mean 1 pstdev 1 count 1 > %s/notcovered%i.cov' % ( tmpdir, idx, tmpdir, idx ) )
run( "bedtools intersect -a %s/donornotcovered%i.bed -b %s/mauve_target%i.bed | awk '{t+=$3-$2;} END {print t;}' > %s/notcovered_overlap%i.cov" % ( tmpdir, idx, tmpdir, idx, tmpdir, idx ) )
bio.log_stderr( 'Stage %i: Mismatch coverage completed' % stage )
stage += 1 # 10
if start <= stage:
reflen = int( open( '%s/notcovered%i.head' % ( tmpdir, idx ), 'r' ).readline().strip().split(':')[-1] )
donorlen = int( open( '%s/remapped%i.head' % ( tmpdir, idx ), 'r' ).readline().strip().split(':')[-1] )
print "===== Stats ====="
# reads
print "-- Reads --"
for line in open( '%s/donorflag%i.txt' % ( tmpdir, idx ) ):
fields = line.strip().split(' ')
if len(fields) > 4 and fields[4] == 'total':
print "Donor reads total: %s" % fields[0]
drt = int(fields[0])
if len(fields) > 3 and fields[3] == 'mapped':
print "Donor reads mapped: %s" % fields[0]
drm = int(fields[0])
print "Donor reads %% mapped: %.1f" % ( 100. * drm / max( 1, drt ) )
for line in open( '%s/referenceflag%i.txt' % ( tmpdir, idx ) ):
fields = line.strip().split()
if len(fields) > 4 and fields[4] == 'total':
print "Reference reads total: %s" % fields[0]
rrt = int(fields[0])
if len(fields) > 3 and fields[3] == 'mapped':
print "Reference reads mapped: %s" % fields[0]
rrm = int(fields[0])
print "Reference reads %% mapped: %.1f" % ( 100. * rrm / max( 1, rrt ) )
for line in open( '%s/remappedflag%i.txt' % ( tmpdir, idx ) ):
fields = line.strip().split()
if len(fields) > 4 and fields[4] == 'total':
print "Remapped reads total: %s" % fields[0]
mrt = int(fields[0])
if len(fields) > 3 and fields[3] == 'mapped':
mrm = int(fields[0])
print "Remapped reads mapped: %i (%.1f%%)" % (mrm, 100. * mrm / max( 1, rrm ) )
print "Remapped reads %% mapped: %.1f" % ( 100. * mrm / max( 1, rrm ) )
for line in open( '%s/mismatchedflag%i.txt' % ( tmpdir, idx ) ):
fields = line.strip().split()
if len(fields) > 4 and fields[4] == 'total':
print "Mismatched reads total: %s" % fields[0]
xrt = int(fields[0])
if len(fields) > 3 and fields[3] == 'mapped':
print "Mismatched reads mapped: %s" % fields[0]
xrm = int(fields[0])
print "Mismatched reads %% mapped: %.1f" % ( 100. * xrm / max(1, xrt) )
for line in open( '%s/notcoveredflag%i.txt' % ( tmpdir, idx ) ):
fields = line.strip().split()
if len(fields) > 4 and fields[4] == 'total':
nrt = int(fields[0])
print "Notcovered reads total: %i (%.1f%%)" % (nrt, 100. * nrt / max( 1, rrt ) )
if len(fields) > 3 and fields[3] == 'mapped':
nrm = int(fields[0])
print "Notcovered reads mapped: %i (%.1f%%)" % (nrm, 100. * nrm / max( 1, rrm ) )
print "Notcovered reads %% mapped: %.1f" % ( 100. * nrm / max( 1, nrt ) )
for line in open( '%s/almostflag%i.txt' % ( tmpdir, idx ) ):
fields = line.strip().split()
if len(fields) > 4 and fields[4] == 'total':
print "Almost correct reads total: %s" % fields[0]
art = int(fields[0])
if len(fields) > 3 and fields[3] == 'mapped':
print "Almost correct reads mapped: %s" % fields[0]
arm = int(fields[0])
print "Almost correct reads %% mapped: %.1f" % ( 100. * arm / max( 1, art ) )
print "\n-- Correctness --"
print "Mapped to correct location: %i (%.1f%%)" % ( mrm - xrm - arm, 100. * ( mrm - xrm - arm ) / max( 1, mrm ) )
print "Mapped correctly or within 50bp: %i (%.1f%%)" % ( mrm - xrm, 100. * ( mrm - xrm ) / max( 1, mrm ) )
print "Mapped incorrectly <50bp: %i (%.1f%%)" % ( arm, 100. * arm / max( 1, mrm ) )
print "Mapped incorrectly >50bp: %i (%.1f%%)" % ( xrm, 100. * xrm / max( 1, mrm ) )
# coverage
print "\n-- Coverage --"
df = open( '%s/donor%i.cov' % ( tmpdir, idx ), 'r' ).readline().strip().split()
donor_not_covered = int(df[0])
not_covered_overlap = int( open( '%s/notcovered_overlap%i.cov' % ( tmpdir, idx ), 'r' ).readline().strip() )
print "Donor not covered: %i (%.2f%%)" % ( donor_not_covered, 100. * donor_not_covered / max( 1, donorlen ) )
print "Donor not covered with mauve target: %i (%.2f%%)" % ( not_covered_overlap, 100. * not_covered_overlap / max( 1, donor_not_covered ) )
print "Donor covered: %i (%.2f%%)" % ( donorlen - int(df[0]), 100. * (donorlen - int(df[0]) ) / max( 1, donorlen ) )
print "Donor gaps: %s" % df[5]
print "Donor max gap: %s" % df[2]
dfs = open( '%s/donorsum%i.cov' % ( tmpdir, idx ), 'r' ).readline().strip().split()
print "Donor mean coverage: %s" % dfs[3]
print "Donor max coverage: %s" % dfs[2]
rf = open( '%s/reference%i.cov' % ( tmpdir, idx ), 'r' ).readline().strip().split()
print "Reference not covered: %s (%.1f%%)" % ( rf[0], 100. * int(rf[0]) / max( 1, reflen ) )
print "Reference covered: %i (%.1f%%)" % ( reflen - int(rf[0]), 100. * (reflen - int(rf[0])) / max( 1, reflen ) )
print "Reference gaps: %s" % rf[5]
print "Reference max gap: %s" % rf[2]
mf = open( '%s/remapped%i.cov' % ( tmpdir, idx ), 'r' ).readline().strip().split()
print "Remapped not covered: %s (%.1f%%)" % (mf[0], 100. * int(mf[0]) / max( 1, donorlen ) )
print "Remapped covered: %i (%.1f%%)" % (donorlen - int(mf[0]), 100. * (donorlen - int(mf[0]) ) / max( 1, donorlen ) )
print "Remapped gaps: %s" % mf[5]
print "Remapped max gap: %s" % mf[2]
mfs = open( '%s/remappedsum%i.cov' % ( tmpdir, idx ), 'r' ).readline().strip().split()
print "Remapped mean coverage: %s" % mfs[3]
print "Remapped max coverage: %s" % mfs[2]
print "\n-- Remapped incorrectly > 50bp --"
xf = open( '%s/mismatched%i.cov' % ( tmpdir, idx ), 'r' ).readline().strip().split()
if len(xf) < 5:
xf = [ '0', '0', '0', '0', '0', '0' ]
print "Bases affected by mismatch: %s" % xf[5]
print "Max mismatch coverage: %s" % xf[2]
print "\n-- Off target (outside mappable region) --"
nf = open( '%s/notcovered%i.cov' % ( tmpdir, idx ), 'r' ).readline().strip().split()
if len(nf) < 5:
nf = [ '0', '0', '0', '0', '0', '0' ]
print "Off target bases: %s" % nf[5]
print "Max coverage of off target: %s" % nf[2]
print "\n-- Remapping --"
remapping_stats = {}
for line in open( '%s/remap_bam%i.stats' % ( tmpdir, idx ), 'r' ):
fields = line.strip().split(':')
if len(fields) >1:
remapping_stats[fields[0].strip()] = int(fields[1])
print "Mapped bases: %i (%.1f%%)" % (remapping_stats['count'], 100. * remapping_stats['count'] / max( 1, donorlen ) )
print "Not mapped bases: %i (%.1f%%)" % (donorlen - remapping_stats['count'], 100. * ( donorlen - remapping_stats['count'] )/ donorlen )
print "Mapped blocks: %s" % remapping_stats['blocks']
print "Covered reads: %i (%.1f)" % ( remapping_stats['reads_covered'], 100. * remapping_stats['reads_covered'] / max( 1, remapping_stats['mapped'] ) )
print "Covered partial reads: %i (%.1f)" % ( remapping_stats['reads_covered'] + remapping_stats['reads_partial'], 100. * ( remapping_stats['reads_covered'] + remapping_stats['reads_partial'] )/ remapping_stats['mapped'] )
print "Not mapped reads: %i (%.1f)" % ( remapping_stats['reads_notcovered'] + remapping_stats['reads_partial'], 100. * ( remapping_stats['reads_notcovered'] + remapping_stats['reads_partial'] ) / max( 1, remapping_stats['mapped'] ) )
print "\n-- Summary --"
coverage_loss = donorlen - ( reflen - int(rf[0]) )
print "Donor not covered by direct alignment: %i (%.2f%%)" % ( donor_not_covered, 100. * donor_not_covered / max( 1, donorlen ) )
print 'Best case loss from reference coverage: %i / %i: %.1f%%' % ( coverage_loss, donorlen, 100. * coverage_loss / max( 1, donorlen ) )
print 'Best case loss from remapping: %i / %i: %.1f%%' % ( donorlen - remapping_stats['count'], donorlen, 100. * ( donorlen - remapping_stats['count'] ) / max( 1, donorlen ) )
print 'Loss after remap coverage: %i / %i: %.1f%%' % ( int(mf[0]), donorlen, 100. * int(mf[0]) / max( 1, donorlen ) )
print 'Loss due to remap: %i / %i: %.1f%%' % ( int(mf[0]) - coverage_loss, donorlen, 100. * ( int(mf[0]) - coverage_loss ) / max( 1, donorlen ) )
print 'Potential mismatch impact: %i / %i: %.1f%%' % ( int(xf[5]), donorlen, 100. * int(xf[5]) / max( 1, donorlen ) )
print 'Off target: %i / %i: %.1f%%' % ( int(nf[5]), donorlen, 100. * int(nf[5]) / max( 1, donorlen ) )
print "Donor not covered with mauve target: %i (%.2f%%)" % ( not_covered_overlap, 100. * not_covered_overlap / max( 1, donor_not_covered ) )
print 'ESTIMATED BIAS: %.1f -> %.1f -> %.1f' % ( 100. * ( int(mf[0]) - int(nf[5]) - not_covered_overlap ) / donorlen , 100. * ( int(mf[0]) - not_covered_overlap ) / donorlen, 100. * ( int(mf[0]) + int(xf[5]) ) / donorlen )
print "===== "
bio.log_stderr( 'Stage %i: Finished' % stage )
stage += 1 # 11
#if start == stage:
if start <= stage:
run( 'rm %s/almost%i.sam %s/reference%i.sam %s/mismatched%i.sam %s/remapped%i.sam %s/notcovered%i.sam %s/donor%i.sam %s/donor%i.bam' % ( tmpdir, idx, tmpdir, idx, tmpdir, idx, tmpdir, idx, tmpdir, idx, tmpdir, idx, tmpdir, idx ) )
bio.log_stderr( 'Stage %i: Cleanup finished' % stage )
|
|
try:
import json
except ImportError:
import simplejson as json
import logging
import time
import requests
from flask import request, render_template, redirect, url_for, flash, session
from pegasus.metrics import app, db, loader, forms
log = logging.getLogger(__name__)
MAX_CONTENT_LENGTH = 16*1024
@app.before_request
def before_request():
db.connect()
@app.teardown_request
def teardown_request(exception):
if exception is not None:
db.rollback()
db.close()
@app.context_processor
def inject_date():
return dict(current_date=time.time())
@app.route('/')
def index():
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.PeriodForm(formdata=session['formdata'])
form.validate()
start = form.get_start()
end = form.get_end()
raw = db.count_raw_data(start, end)
invalid = db.count_invalid_data(start, end)
errors = db.count_planner_errors(start, end)
planner_stats = db.get_planner_stats(start, end)
dagman_stats = db.get_dagman_stats(start, end)
downloads = db.count_downloads(start, end)
table_args = {
"limit" : 5,
"offset" : 0,
"start_time" : start,
"end_time" : end
}
# These count variables are just dummies to get the top_hosts and top_domains
totalCount, filterCount, top_hosts = db.get_top_hosts(**table_args)
totalCount, filterCount, top_domains = db.get_top_domains(**table_args)
return render_template('index.html',
raw=raw,
invalid=invalid,
planner_errors=errors,
planner_stats=planner_stats,
dagman_stats=dagman_stats,
top_hosts=top_hosts,
top_domains=top_domains,
downloads=downloads,
form=form)
@app.route('/reprocess', methods=["POST"])
def reprocess():
i = loader.reprocess_raw_data()
db.commit()
flash("Reprocessed %d objects successfully" % i)
return redirect(request.referrer or url_for('index'))
@app.route('/invalid')
def invalid():
objects = db.get_invalid_data()
for obj in objects:
data = obj["data"]
data = json.loads(data)
obj["data"] = json.dumps(data, indent=4)
return render_template('invalid.html',
objects=objects)
@app.route('/planner/recenterrors')
def recent_errors():
if request.is_xhr:
table_args = __get_datatables_args()
totalCount, filteredCount, errors = db.get_recent_errors(**table_args)
return render_template('recent_errors.json', table_args=table_args, count=totalCount, filtered=filteredCount, errors=errors)
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.PeriodForm(formdata=session['formdata'])
form.validate()
return render_template('recent_errors.html', form=form)
@app.route('/planner/toperrors')
def top_errors():
if request.is_xhr:
table_args = __get_datatables_args()
totalCount, filteredCount, errors = db.get_top_errors(**table_args)
return render_template('top_errors.json', table_args=table_args, count=totalCount, filtered=filteredCount, errors=errors)
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.PeriodForm(formdata=session['formdata'])
form.validate()
return render_template('top_errors.html', form=form)
@app.route('/planner/topdomains')
def top_domains():
if request.is_xhr :
table_args = __get_datatables_args()
totalCount, filteredCount, domains = db.get_top_domains(**table_args)
return render_template('top_domains.json', table_args=table_args, count=totalCount, filtered=filteredCount, domains=domains)
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.PeriodForm(formdata=session['formdata'])
form.validate()
return render_template('top_domains.html', form=form)
@app.route('/planner/tophosts')
def top_hosts():
if request.is_xhr:
table_args = __get_datatables_args()
totalCount, filteredCount, hosts = db.get_top_hosts(**table_args)
return render_template('top_hosts.json', table_args=table_args, count=totalCount, filtered=filteredCount, hosts=hosts)
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.PeriodForm(formdata=session['formdata'])
form.validate()
return render_template('top_hosts.html', form=form)
@app.route('/planner/errorsbyhash/<errhash>')
def error_hash(errhash):
if request.is_xhr:
table_args = __get_datatables_args()
totalCount, filteredCount, errors = db.get_errors_by_hash(**table_args)
return render_template('error_list.json', table_args=table_args, count=totalCount, filtered=filteredCount, errors=errors)
return render_template('error_list.html', err_hash=errhash)
@app.route('/planner/metrics/<objid>')
def planner_metric(objid):
obj = db.get_metrics_and_error(objid)
runs = db.get_runs_for_workflow(obj['root_wf_uuid'])
return render_template('planner_metric.html',
obj=obj,
runs=runs)
@app.route('/planner/recentapplications')
def recent_applications():
if request.is_xhr:
table_args = __get_datatables_args()
totalCount, filteredCount, applications = db.get_recent_applications(**table_args)
return render_template('recent_applications.json', table_args=table_args, count=totalCount, filtered=filteredCount, applications=applications)
form = forms.PeriodForm(request.args)
form.validate()
return render_template('recent_applications.html', form=form)
@app.route('/planner/topapplications')
def top_applications():
if request.is_xhr:
table_args = __get_datatables_args()
totalCount, filteredCount, applications = db.get_top_applications(**table_args)
return render_template('top_applications.json', table_args=table_args, count=totalCount, filtered=filteredCount, applications=applications)
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.PeriodForm(formdata=session['formdata'])
form.validate()
return render_template('top_applications.html', form=form)
@app.route('/planner/map')
def map_metrics():
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.MapForm(formdata=session['formdata'])
form.validate()
start = form.get_start()
end = form.get_end()
pins = form.get_pins()
locations = db.get_locations(pins, start, end)
return render_template('maps.html',
form =form,
locations=locations)
@app.route('/planner/trends')
def planner_trends():
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.TrendForm(formdata=session['formdata'])
form.validate()
intervals = form.get_monthly_intervals()
trend = []
for i in range(len(intervals)-1):
newPlans = db.get_metrics_by_version(intervals[i+1], intervals[i])
trend.append(newPlans)
return render_template('planner_trends.html',
form=form,
intervals=intervals,
trend=trend)
@app.route('/planner/histograms')
def histograms():
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.HistogramForm(formdata=session['formdata'])
form.validate()
field = form.get_metric()
start = form.get_start()
end = form.get_end()
intervals = form.get_intervals()
data = []
for i in range(1, len(intervals)):
data.append(db.get_workflow_count_by_field(field, intervals[i-1], intervals[i], start, end))
return render_template('histograms.html',
form=form,
trend=data,
intervals=intervals)
@app.route('/locations/<ipaddr>')
def location_metric(ipaddr):
location = db.get_location(ipaddr)
return render_template('location.html', location=location)
@app.route('/runs/topapplications')
def top_application_runs():
if request.is_xhr:
table_args = __get_datatables_args()
totalCount, filteredCount, applications = db.get_top_application_runs(**table_args)
return render_template('top_application_runs.json', table_args=table_args, count=totalCount, filtered=filteredCount, applications=applications)
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.PeriodForm(formdata=session['formdata'])
form.validate()
return render_template('top_application_runs.html', form=form)
@app.route('/downloads/recent')
def recent_downloads():
if request.is_xhr:
table_args = __get_datatables_args()
totalCount, filteredCount, downloads = db.get_recent_downloads(**table_args)
return render_template('recent_downloads.json', table_args=table_args, count=totalCount, filtered=filteredCount, downloads=downloads)
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.PeriodForm(formdata=session['formdata'])
form.validate()
return render_template('recent_downloads.html', form=form)
@app.route('/downloads/popular')
def popular_downloads():
if request.is_xhr:
table_args = __get_datatables_args()
totalCount, filteredCount, downloads = db.get_popular_downloads(**table_args)
return render_template('popular_downloads.json', table_args=table_args, count=totalCount, filtered=filteredCount, downloads=downloads)
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.PeriodForm(formdata=session['formdata'])
return render_template('popular_downloads.html', form=form)
@app.route('/downloads/trends')
def download_trends():
if request.args or 'formdata' not in session:
session['formdata'] = request.args
form = forms.TrendForm(formdata=session['formdata'])
form.validate()
intervals = form.get_monthly_intervals()
trend = []
for i in range(len(intervals)-1):
newDownloads = db.get_downloads_by_version(intervals[i+1], intervals[i])
trend.append(newDownloads)
return render_template('download_trends.html',
form=form,
intervals=intervals,
trend=trend)
@app.route('/downloads/metrics/<objid>')
def download_metric(objid):
obj = db.get_download(objid)
return render_template('download_metric.html', obj=obj)
@app.route('/status')
def status():
# Make sure the database is reachable and that
# it received some data in the last 24 hours
now = time.time()
then = now - (24*60*60)
count = db.count_raw_data(then, now)
if count == 0:
return "No data in last 24 hours", 503
return "OK", 200
@app.route('/metrics', methods=["POST"])
def store_metrics():
# Check the content-type
try:
type_header = request.headers["Content-Type"]
if type_header.lower() != "application/json":
log.error("Invalid Content-Type")
return "Invalid Content-Type", 400
except:
return "Invalid Content-Type", 400
# Check the length
try:
length = int(request.headers["Content-Length"])
if length > MAX_CONTENT_LENGTH:
return "Request too large", 400
except:
return "Invalid Content-Length", 400
# Read and parse the data
try:
data = request.json
except Exception, e:
log.error("Error parsing JSON object: %s", e)
return "Error parsing JSON object", 400
# TODO Validate required fields
if "type" not in data:
return "type missing", 400
if "client" not in data:
return "client missing", 400
if "version" not in data:
return "version missing", 400
# Record the time that the data was received
# The old downloads will have a timestamp already, so
# don't add one if the key exists
if "ts" not in data:
data["ts"] = time.time()
ts = data["ts"]
# Get the remote IP address. The downloads will have
# a remote_addr already, so don't add it if the key
# exists
if "remote_addr" not in data:
data["remote_addr"] = request.environ["REMOTE_ADDR"]
remote_addr = data["remote_addr"]
# Store the raw data
try:
data["id"] = db.store_raw_data(ts, remote_addr, data)
db.commit()
except Exception, e:
log.error("Error storing JSON data: %s", e)
db.rollback()
return "Error storing JSON data", 500
# Store the processed data
try:
loader.process_raw_data(data)
db.commit()
except Exception, e:
log.error("Error processing JSON data: %s", e)
db.rollback()
return "", 202
def __get_datatables_args():
'''
Extract list of arguments passed in the request
'''
table_args = dict()
if request.args.get('sEcho'):
table_args['sequence'] = request.args.get('sEcho')
if request.args.get('iColumns'):
table_args['column-count'] = int(request.args.get('iColumns'))
if request.args.get('sColumns'):
table_args['columns'] = request.args.get('sColumns')
if request.args.get('iDisplayStart'):
table_args['offset'] = int(request.args.get('iDisplayStart'))
if request.args.get('iDisplayLength'):
table_args['limit'] = int(request.args.get('iDisplayLength'))
if request.args.get('sSearch'):
table_args['filter'] = request.args.get('sSearch')
if request.args.get('bRegex'):
table_args['filter-regex'] = request.args.get('bRegex')
if request.args.get('iSortingCols'):
table_args['sort-col-count'] = int(request.args.get('iSortingCols'))
if request.args.get('start_time'):
table_args['start_time'] = request.args.get('start_time')
if request.args.get('end_time'):
table_args['end_time'] = request.args.get('end_time')
if request.args.get('form_only'):
table_args['form_only'] = request.args.get('form_only')
if request.args.get('errhash'):
table_args['errhash'] = request.args.get('errhash')
if request.args.get('iColumns'):
for i in range(int(request.args.get('iColumns'))):
i = str(i)
if request.args.get('mDataProp_' + i):
table_args['mDataProp_' + i] = request.args.get('mDataProp_' + i)
if request.args.get('sSearch_' + i):
table_args['sSearch_' + i] = request.args.get('sSearch_' + i)
if request.args.get('bRegex_' + i):
table_args['bRegex_' + i] = request.args.get('bRegex_' + i)
if request.args.get('bSearchable_' + i):
table_args['bSearchable_' + i] = request.args.get('bSearchable_' + i)
if request.args.get('iSortCol_' + i):
table_args['iSortCol_' + i] = int(request.args.get('iSortCol_' + i))
if request.args.get('bSortable_' + i):
table_args['bSortable_' + i] = request.args.get('bSortable_' + i)
if request.args.get('sSortDir_' + i):
table_args['sSortDir_' + i] = request.args.get('sSortDir_' + i)
return table_args
|
|
#!/usr/bin/env python
# Copyright (c) 2016 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates various info tables from SPIR-V JSON grammar."""
from __future__ import print_function
import errno
import functools
import json
import os.path
import re
# Prefix for all C variables generated by this script.
PYGEN_VARIABLE_PREFIX = 'pygen_variable'
def make_path_to_file(f):
"""Makes all ancestor directories to the given file, if they
don't yet exist.
Arguments:
f: The file whose ancestor directories are to be created.
"""
dir = os.path.dirname(os.path.abspath(f))
try:
os.makedirs(dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dir):
pass
else:
raise
def compose_capability_list(caps):
"""Returns a string containing a braced list of capabilities as enums.
Arguments:
- caps: a sequence of capability names
Returns:
a string containing the braced list of SpvCapability* enums named by caps.
"""
return "{" + ", ".join(['SpvCapability{}'.format(c) for c in caps]) + "}"
def compose_extension_list(exts):
"""Returns a string containing a braced list of extensions as enums.
Arguments:
- exts: a sequence of extension names
Returns:
a string containing the braced list of SpvCapability* enums named by caps.
"""
return "{" + ", ".join(['libspirv::Extension::k{}'.format(e) for e in exts]) + "}"
def convert_operand_kind(operand_tuple):
"""Returns the corresponding operand type used in spirv-tools for
the given operand kind and quantifier used in the JSON grammar.
Arguments:
- operand_tuple: a tuple of two elements:
- operand kind: used in the JSON grammar
- quantifier: '', '?', or '*'
Returns:
a string of the enumerant name in spv_operand_type_t
"""
kind, quantifier = operand_tuple
# The following cases are where we differ between the JSON grammar and
# spirv-tools.
if kind == 'IdResultType':
kind = 'TypeId'
elif kind == 'IdResult':
kind = 'ResultId'
elif kind == 'IdMemorySemantics' or kind == 'MemorySemantics':
kind = 'MemorySemanticsId'
elif kind == 'IdScope' or kind == 'Scope':
kind = 'ScopeId'
elif kind == 'IdRef':
kind = 'Id'
elif kind == 'ImageOperands':
kind = 'Image'
elif kind == 'Dim':
kind = 'Dimensionality'
elif kind == 'ImageFormat':
kind = 'SamplerImageFormat'
elif kind == 'KernelEnqueueFlags':
kind = 'KernelEnqFlags'
elif kind == 'LiteralExtInstInteger':
kind = 'ExtensionInstructionNumber'
elif kind == 'LiteralSpecConstantOpInteger':
kind = 'SpecConstantOpNumber'
elif kind == 'LiteralContextDependentNumber':
kind = 'TypedLiteralNumber'
elif kind == 'PairLiteralIntegerIdRef':
kind = 'LiteralIntegerId'
elif kind == 'PairIdRefLiteralInteger':
kind = 'IdLiteralInteger'
elif kind == 'PairIdRefIdRef': # Used by OpPhi in the grammar
kind = 'Id'
if kind == 'FPRoundingMode':
kind = 'FpRoundingMode'
elif kind == 'FPFastMathMode':
kind = 'FpFastMathMode'
if quantifier == '?':
kind = 'Optional{}'.format(kind)
elif quantifier == '*':
kind = 'Variable{}'.format(kind)
return 'SPV_OPERAND_TYPE_{}'.format(
re.sub(r'([a-z])([A-Z])', r'\1_\2', kind).upper())
class InstInitializer(object):
"""Instances holds a SPIR-V instruction suitable for printing as
the initializer for spv_opcode_desc_t."""
def __init__(self, opname, caps, operands):
"""Initialization.
Arguments:
- opname: opcode name (with the 'Op' prefix)
- caps: a sequence of capability names required by this opcode
- operands: a sequence of (operand-kind, operand-quantifier) tuples
"""
assert opname.startswith('Op')
self.opname = opname[2:] # Remove the "Op" prefix.
self.caps_mask = compose_capability_list(caps)
self.operands = [convert_operand_kind(o) for o in operands]
self.fix_syntax()
operands = [o[0] for o in operands]
self.ref_type_id = 'IdResultType' in operands
self.def_result_id = 'IdResult' in operands
def fix_syntax(self):
"""Fix an instruction's syntax, adjusting for differences between
the officially released grammar and how SPIRV-Tools uses the grammar.
Fixes:
- ExtInst should not end with SPV_OPERAND_VARIABLE_ID.
https://github.com/KhronosGroup/SPIRV-Tools/issues/233
"""
if (self.opname == 'ExtInst'
and self.operands[-1] == 'SPV_OPERAND_TYPE_VARIABLE_ID'):
self.operands.pop()
def __str__(self):
template = ['{{"{opname}"', 'SpvOp{opname}', '{caps_mask}',
'{num_operands}', '{{{operands}}}',
'{def_result_id}', '{ref_type_id}}}']
return ', '.join(template).format(
opname=self.opname,
caps_mask=self.caps_mask,
num_operands=len(self.operands),
operands=', '.join(self.operands),
def_result_id=(1 if self.def_result_id else 0),
ref_type_id=(1 if self.ref_type_id else 0))
class ExtInstInitializer(object):
"""Instances holds a SPIR-V extended instruction suitable for printing as
the initializer for spv_ext_inst_desc_t."""
def __init__(self, opname, opcode, caps, operands):
"""Initialization.
Arguments:
- opname: opcode name
- opcode: enumerant value for this opcode
- caps: a sequence of capability names required by this opcode
- operands: a sequence of (operand-kind, operand-quantifier) tuples
"""
self.opname = opname
self.opcode = opcode
self.caps_mask = compose_capability_list(caps)
self.operands = [convert_operand_kind(o) for o in operands]
self.operands.append('SPV_OPERAND_TYPE_NONE')
def __str__(self):
template = ['{{"{opname}"', '{opcode}', '{caps_mask}',
'{{{operands}}}}}']
return ', '.join(template).format(
opname=self.opname,
opcode=self.opcode,
caps_mask=self.caps_mask,
operands=', '.join(self.operands))
def generate_instruction(inst, is_ext_inst):
"""Returns the C initializer for the given SPIR-V instruction.
Arguments:
- inst: a dict containing information about a SPIR-V instruction
- is_ext_inst: a bool indicating whether |inst| is an extended
instruction.
Returns:
a string containing the C initializer for spv_opcode_desc_t or
spv_ext_inst_desc_t
"""
opname = inst.get('opname')
opcode = inst.get('opcode')
caps = inst.get('capabilities', [])
operands = inst.get('operands', {})
operands = [(o['kind'], o.get('quantifier', '')) for o in operands]
assert opname is not None
if is_ext_inst:
return str(ExtInstInitializer(opname, opcode, caps, operands))
else:
return str(InstInitializer(opname, caps, operands))
def generate_instruction_table(inst_table, is_ext_inst):
"""Returns the info table containing all SPIR-V instructions.
Arguments:
- inst_table: a dict containing all SPIR-V instructions.
- is_ext_inst: a bool indicating whether |inst_table| is for
an extended instruction set.
"""
return ',\n'.join([generate_instruction(inst, is_ext_inst)
for inst in inst_table])
class EnumerantInitializer(object):
"""Prints an enumerant as the initializer for spv_operand_desc_t."""
def __init__(self, enumerant, value, caps, exts, parameters):
"""Initialization.
Arguments:
- enumerant: enumerant name
- value: enumerant value
- caps: a sequence of capability names required by this enumerant
- exts: a sequence of names of extensions enabling this enumerant
- parameters: a sequence of (operand-kind, operand-quantifier) tuples
"""
self.enumerant = enumerant
self.value = value
self.caps = compose_capability_list(caps)
self.exts = compose_extension_list(exts)
self.parameters = [convert_operand_kind(p) for p in parameters]
def __str__(self):
template = ['{{"{enumerant}"', '{value}',
'{caps}', '{exts}', '{{{parameters}}}}}']
return ', '.join(template).format(
enumerant=self.enumerant,
value=self.value,
caps=self.caps,
exts=self.exts,
parameters=', '.join(self.parameters))
def generate_enum_operand_kind_entry(entry):
"""Returns the C initializer for the given operand enum entry.
Arguments:
- entry: a dict containing information about an enum entry
Returns:
a string containing the C initializer for spv_operand_desc_t
"""
enumerant = entry.get('enumerant')
value = entry.get('value')
caps = entry.get('capabilities', [])
exts = entry.get('exts', [])
params = entry.get('parameters', [])
params = [p.get('kind') for p in params]
params = zip(params, [''] * len(params))
assert enumerant is not None
assert value is not None
return str(EnumerantInitializer(enumerant, value, caps, exts, params))
def generate_enum_operand_kind(enum):
"""Returns the C definition for the given operand kind."""
kind = enum.get('kind')
assert kind is not None
name = '{}_{}Entries'.format(PYGEN_VARIABLE_PREFIX, kind)
entries = [' {}'.format(generate_enum_operand_kind_entry(e))
for e in enum.get('enumerants', [])]
template = ['static const spv_operand_desc_t {name}[] = {{',
'{entries}', '}};']
entries = '\n'.join(template).format(
name=name,
entries=',\n'.join(entries))
return kind, name, entries
def generate_operand_kind_table(enums):
"""Returns the info table containing all SPIR-V operand kinds."""
# We only need to output info tables for those operand kinds that are enums.
enums = [generate_enum_operand_kind(e)
for e in enums
if e.get('category') in ['ValueEnum', 'BitEnum']]
# We have three operand kinds that requires their optional counterpart to
# exist in the operand info table.
three_optional_enums = ['ImageOperands', 'AccessQualifier', 'MemoryAccess']
three_optional_enums = [e for e in enums if e[0] in three_optional_enums]
enums.extend(three_optional_enums)
enum_kinds, enum_names, enum_entries = zip(*enums)
# Mark the last three as optional ones.
enum_quantifiers = [''] * (len(enums) - 3) + ['?'] * 3
# And we don't want redefinition of them.
enum_entries = enum_entries[:-3]
enum_kinds = [convert_operand_kind(e)
for e in zip(enum_kinds, enum_quantifiers)]
table_entries = zip(enum_kinds, enum_names, enum_names)
table_entries = [' {{{}, ARRAY_SIZE({}), {}}}'.format(*e)
for e in table_entries]
template = [
'static const spv_operand_desc_group_t {p}_OperandInfoTable[] = {{',
'{enums}', '}};']
table = '\n'.join(template).format(
p=PYGEN_VARIABLE_PREFIX, enums=',\n'.join(table_entries))
return '\n\n'.join(enum_entries + (table,))
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate SPIR-V info tables')
parser.add_argument('--spirv-core-grammar', metavar='<path>',
type=str, required=True,
help='input JSON grammar file for core SPIR-V '
'instructions')
parser.add_argument('--extinst-glsl-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for GLSL extended '
'instruction set')
parser.add_argument('--extinst-opencl-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for OpenGL extended '
'instruction set')
parser.add_argument('--core-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for core SPIR-V instructions')
parser.add_argument('--glsl-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for GLSL extended instruction set')
parser.add_argument('--opencl-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for OpenCL extended instruction set')
parser.add_argument('--operand-kinds-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for operand kinds')
args = parser.parse_args()
if (args.core_insts_output is None) != \
(args.operand_kinds_output is None):
print('error: --core-insts-output and --operand_kinds_output '
'should be specified together.')
exit(1)
if (args.glsl_insts_output is None) != \
(args.extinst_glsl_grammar is None):
print('error: --glsl-insts-output and --extinst-glsl-grammar '
'should be specified together.')
exit(1)
if (args.opencl_insts_output is None) != \
(args.extinst_opencl_grammar is None):
print('error: --opencl-insts-output and --extinst-opencl-grammar '
'should be specified together.')
exit(1)
if all([args.core_insts_output is None,
args.glsl_insts_output is None,
args.opencl_insts_output is None]):
print('error: at least one output should be specified.')
exit(1)
with open(args.spirv_core_grammar) as json_file:
grammar = json.loads(json_file.read())
if args.core_insts_output is not None:
make_path_to_file(args.core_insts_output)
make_path_to_file(args.operand_kinds_output)
print(generate_instruction_table(grammar['instructions'], False),
file=open(args.core_insts_output, 'w'))
print(generate_operand_kind_table(grammar['operand_kinds']),
file=open(args.operand_kinds_output, 'w'))
if args.extinst_glsl_grammar is not None:
with open(args.extinst_glsl_grammar) as json_file:
grammar = json.loads(json_file.read())
make_path_to_file(args.glsl_insts_output)
print(generate_instruction_table(grammar['instructions'], True),
file=open(args.glsl_insts_output, 'w'))
if args.extinst_opencl_grammar is not None:
with open(args.extinst_opencl_grammar) as json_file:
grammar = json.loads(json_file.read())
make_path_to_file(args.opencl_insts_output)
print(generate_instruction_table(grammar['instructions'], True),
file=open(args.opencl_insts_output, 'w'))
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""client module tests."""
import logging
import sys
import mox
import stubout
from google.apputils import app
from google.apputils import basetest
from simian.client import client
if hasattr(mox.MockAnything, '__str__'): del(mox.MockAnything.__str__)
logging.basicConfig(filename='/dev/null')
class GenericException(Exception):
"""A generic exception that can be used for mocks."""
pass
class ClientModuleTest(mox.MoxTestBase):
"""Test the client module."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testConstants(self):
for a in [
'SERVER_HOSTNAME', 'SERVER_PORT', 'AUTH_DOMAIN',
'CLIENT_SSL_PATH', 'SEEK_SET', 'SEEK_CUR', 'SEEK_END',
'DEBUG', 'URL_UPLOADPKG']:
self.assertTrue(hasattr(client, a))
class MultiBodyConnectionTest(mox.MoxTestBase):
"""Test MultiBodyConnection class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.mbc = client.MultiBodyConnection()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testSetProgressCallback(self):
"""Test SetProgressCallback()."""
fn = lambda x: 1
self.mox.ReplayAll()
self.assertFalse(hasattr(self.mbc, '_progress_callback'))
self.mbc.SetProgressCallback(fn)
self.assertEqual(self.mbc._progress_callback, fn)
self.assertRaises(
client.Error,
self.mbc.SetProgressCallback, 1)
self.mox.VerifyAll()
def testProgressCallback(self):
"""Test _ProgressCallback()."""
callback = self.mox.CreateMockAnything()
callback(1, 2).AndReturn(None)
self.mox.ReplayAll()
self.mbc._ProgressCallback(1, 2)
self.mbc._progress_callback = callback
self.mbc._ProgressCallback(1, 2)
self.mox.VerifyAll()
def testRequest(self):
"""Test request()."""
f_body = 'x' * 10000
f = self.mox.CreateMockAnything()
method = 'GET'
url = '/foo'
body = ['hello', f]
content_length = len(body[0]) + len(f_body)
headers = {
'Content-Length': content_length,
}
self.mbc._is_https = False
mock_request = self.mox.CreateMockAnything()
self.stubs.Set(client.httplib.HTTPConnection, 'request', mock_request)
self.mbc.send = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.mbc, '_ProgressCallback')
f.tell().AndReturn(0)
f.seek(0, client.SEEK_END).AndReturn(None)
f.tell().AndReturn(len(f_body))
f.seek(0, client.SEEK_SET).AndReturn(None)
mock_request(
self.mbc,
method, url, headers=headers).AndReturn(None)
self.mbc._ProgressCallback(0, content_length)
self.mbc.send(body[0]).AndReturn(None)
self.mbc._ProgressCallback(len(body[0]), content_length).AndReturn(None)
f.read(8192).AndReturn(f_body[:8192])
self.mbc.send(f_body[:8192]).AndReturn(None)
self.mbc._ProgressCallback(
len(body[0]) + 8192, content_length).AndReturn(None)
f.read(8192).AndReturn(f_body[8192:])
self.mbc.send(f_body[8192:]).AndReturn(None)
self.mbc._ProgressCallback(
len(body[0]) + len(f_body), content_length).AndReturn(None)
f.read(8192).AndReturn('')
self.mbc._ProgressCallback(
len(body[0]) + len(f_body), content_length).AndReturn(None)
self.mox.ReplayAll()
self.mbc.request(method, url, body=body)
self.mox.VerifyAll()
class HTTPSMultiBodyConnectionTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.hostname = 'foohost'
self.mbc = client.HTTPSMultiBodyConnection(self.hostname)
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testParentClassRequestAssumption(self):
"""Test assumptions of parent class request()."""
method = 'GET'
url = '/foo'
body = None
headers = {}
mock_fn = self.mox.CreateMockAnything()
self.stubs.Set(
client.httplib.HTTPConnection, '_send_request', mock_fn)
mock_fn(method, url, body, headers).AndReturn(-1)
self.mox.ReplayAll()
c = client.httplib.HTTPConnection(self.hostname)
self.assertEqual(None, c.request(method, url))
self.mox.VerifyAll()
def testParentClassSendRequestAssumption(self):
"""Test assumptions of parent class _send_request()."""
method = 'GET'
url = '/foo'
body1 = None
body2 = 'howdy'
headers = {'foo': 'bar'}
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'putrequest')
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'putheader')
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'endheaders')
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'send')
# with a None body supplied, send() is never called. on >=2.7
# endheaders is still called with the body contents, even if they
# are None.
client.httplib.HTTPConnection.putrequest(method, url).AndReturn(None)
client.httplib.HTTPConnection.putheader(
'foo', headers['foo']).AndReturn(None)
if sys.version_info[0] >= 2 and sys.version_info[1] >= 7:
client.httplib.HTTPConnection.endheaders(body1).AndReturn(None)
else:
client.httplib.HTTPConnection.endheaders().AndReturn(None)
# with a body supplied, send() is called inside _send_request() on
# httplib < 2.6. in >=2.7 endheaders() sends the body and headers
# all at once.
client.httplib.HTTPConnection.putrequest(method, url).AndReturn(None)
client.httplib.HTTPConnection.putheader(
'Content-Length', str(len(body2)))
client.httplib.HTTPConnection.putheader(
'foo', headers['foo']).AndReturn(None)
if sys.version_info[0] >= 2 and sys.version_info[1] >= 7:
client.httplib.HTTPConnection.endheaders(body2).AndReturn(None)
else:
client.httplib.HTTPConnection.endheaders().AndReturn(None)
client.httplib.HTTPConnection.send(body2).AndReturn(None)
self.mox.ReplayAll()
c = client.httplib.HTTPConnection(self.hostname)
c._send_request(method, url, body1, headers)
c._send_request(method, url, body2, headers)
self.mox.VerifyAll()
def testDirectSendTypes(self):
"""Test the DIRECT_SEND_TYPES constant for sane values."""
self.assertTrue(type(self.mbc.DIRECT_SEND_TYPES) is list)
def testRequestSimple(self):
"""Test request with one body element."""
method = 'GET'
url = '/foo'
body = 'hello'
headers = {
'Content-Length': len(body),
'Host': self.hostname,
}
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'request')
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'send')
client.httplib.HTTPConnection.request(
self.mbc,
method, url, headers=headers).AndReturn(None)
client.httplib.HTTPConnection.send(body).AndReturn(None)
self.mox.ReplayAll()
self.mbc.request(method, url, body=body)
self.mox.VerifyAll()
def testRequestMultiString(self):
"""Test request() with multiple body string elements."""
method = 'GET'
url = '/foo'
body = ['hello', 'there']
headers = {
'Content-Length': sum(map(len, body)),
'Host': self.hostname,
}
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'request')
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'send')
client.httplib.HTTPConnection.request(
self.mbc,
method, url, headers=headers).AndReturn(None)
for s in body:
client.httplib.HTTPConnection.send(s).AndReturn(None)
self.mox.ReplayAll()
self.mbc.request(method, url, body=body)
self.mox.VerifyAll()
def testRequestMultiMixed(self):
"""Test request() with multiple mixed body elements."""
f_body = 'there'
f = self.mox.CreateMockAnything()
method = 'GET'
url = '/foo'
body = ['hello', f]
content_length = len(body[0]) + len(f_body)
headers = {
'Content-Length': content_length,
'Host': self.hostname,
}
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'request')
self.mox.StubOutWithMock(client.httplib.HTTPConnection, 'send')
f.tell().AndReturn(0)
f.seek(0, client.SEEK_END).AndReturn(None)
f.tell().AndReturn(len(f_body))
f.seek(0, client.SEEK_SET).AndReturn(None)
client.httplib.HTTPConnection.request(
self.mbc,
method, url, headers=headers).AndReturn(None)
client.httplib.HTTPConnection.send(body[0]).AndReturn(None)
f.read(8192).AndReturn(f_body)
client.httplib.HTTPConnection.send(f_body).AndReturn(None)
f.read(8192).AndReturn('')
self.mox.ReplayAll()
self.mbc.request(method, url, body=body)
self.mox.VerifyAll()
def testSetCACertChain(self):
"""Test SetCACertChain()."""
self.mbc.SetCACertChain('foo')
self.assertEqual(self.mbc._ca_cert_chain, 'foo')
def testIsValidCert(self):
"""Test _IsValidCert()."""
store = self.mox.CreateMockAnything()
self.mox.ReplayAll()
self.assertEqual(1, self.mbc._IsValidCert(1, store))
self.mox.VerifyAll()
def testIsValidCertOkZero(self):
"""Test _IsValidCert()."""
store = self.mox.CreateMockAnything()
store.get_current_cert().AndReturn(store)
store.get_subject().AndReturn(store)
store.__str__().AndReturn('valid')
self.mox.ReplayAll()
self.assertEqual(0, self.mbc._IsValidCert(0, store))
self.mox.VerifyAll()
def testLoadCACertChain(self):
"""Test _LoadCACertChain()."""
ctx = self.mox.CreateMockAnything()
tf = self.mox.CreateMockAnything()
cert_chain = 'cert chain la la ..'
self.mbc._ca_cert_chain = cert_chain
self.mox.StubOutWithMock(client.tempfile, 'NamedTemporaryFile')
client.tempfile.NamedTemporaryFile().AndReturn(tf)
tf.write(cert_chain).AndReturn(None)
tf.flush().AndReturn(None)
tf.name = '/tmp/somefilename'
ctx.load_verify_locations(cafile=tf.name).AndReturn(1)
ctx.set_verify(
client.SSL.verify_peer | client.SSL.verify_fail_if_no_peer_cert,
depth=9,
callback=self.mbc._IsValidCert).AndReturn(None)
tf.close()
self.mox.ReplayAll()
self.mbc._LoadCACertChain(ctx)
self.mox.VerifyAll()
def testLoadCACertChainWhenLoadError(self):
"""Test _LoadCACertChain()."""
ctx = self.mox.CreateMockAnything()
tf = self.mox.CreateMockAnything()
cert_chain = 'cert chain la la ..'
self.mbc._ca_cert_chain = cert_chain
self.mox.StubOutWithMock(client.tempfile, 'NamedTemporaryFile')
client.tempfile.NamedTemporaryFile().AndReturn(tf)
tf.write(cert_chain).AndReturn(None)
tf.flush().AndReturn(None)
tf.name = '/tmp/somefilename'
ctx.load_verify_locations(cafile=tf.name).AndReturn(-1)
tf.close()
self.mox.ReplayAll()
self.assertRaises(
client.SimianClientError, self.mbc._LoadCACertChain, ctx)
self.mox.VerifyAll()
def testLoadCACertChainWhenNone(self):
"""Test _LoadCACertChain()."""
ctx = self.mox.CreateMockAnything()
self.mox.ReplayAll()
self.assertRaises(
client.SimianClientError, self.mbc._LoadCACertChain, ctx)
self.mox.VerifyAll()
def testConnect(self):
"""Test connect()."""
context = self.mox.CreateMockAnything()
conn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(client, 'SSL')
self.mox.StubOutWithMock(client.SSL, 'Context')
self.mox.StubOutWithMock(client.SSL, 'Connection')
self.mox.StubOutWithMock(self.mbc, '_LoadCACertChain')
self.mbc._ca_cert_chain = 'cert chain foo'
client.SSL.Context().AndReturn(context)
self.mbc._LoadCACertChain(context).AndReturn(None)
def __connect(address): # pylint: disable=g-bad-name
self.assertEqual(address, (self.mbc.host, self.mbc.port))
return None
client.SSL.Connection(context).AndReturn(conn)
conn.connect = __connect
self.mox.ReplayAll()
self.mbc.connect()
self.assertEqual(self.mbc.sock, conn)
self.mox.VerifyAll()
def testConnectWhenNoCACertChain(self):
"""Test connect()."""
context = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(client, 'SSL')
self.mox.StubOutWithMock(client.SSL, 'Context')
client.SSL.Context().AndReturn(context)
self.mox.ReplayAll()
self.assertRaises(client.SimianClientError, self.mbc.connect)
self.mox.VerifyAll()
class HttpsClientTest(mox.MoxTestBase):
"""Test HttpsClient class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.hostname = 'hostname'
self.port = None
self.client = client.HttpsClient(self.hostname)
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testInit(self):
"""Test __init__()."""
mock_lh = self.mox.CreateMockAnything()
self.stubs.Set(client.HttpsClient, '_LoadHost', mock_lh)
self.mox.StubOutWithMock(self.client, '_LoadHost')
mock_lh(self.hostname, None, None).AndReturn(None)
self.mox.ReplayAll()
i = client.HttpsClient(self.hostname)
self.assertEqual(i._progress_callback, None)
self.assertEqual(i._ca_cert_chain, None)
self.mox.VerifyAll()
def testLoadHost(self):
"""Test _LoadHost()."""
self.mox.ReplayAll()
self.client._LoadHost('host')
self.assertEqual(self.client.hostname, 'host')
self.assertEqual(self.client.port, None)
self.assertTrue(self.client.use_https)
self.client._LoadHost('host', 12345)
self.assertEqual(self.client.hostname, 'host')
self.assertEqual(self.client.port, 12345)
self.assertTrue(self.client.use_https)
self.client._LoadHost('https://tsoh:54321')
self.assertEqual(self.client.hostname, 'tsoh')
self.assertEqual(self.client.port, 54321)
self.assertTrue(self.client.use_https)
self.client._LoadHost('https://tsoh:54321', 9999)
self.assertEqual(self.client.hostname, 'tsoh')
self.assertEqual(self.client.port, 54321)
self.assertTrue(self.client.use_https)
self.client._LoadHost('foo.bar:5555')
self.assertEqual(self.client.hostname, 'foo.bar')
self.assertEqual(self.client.port, 5555)
self.assertTrue(self.client.use_https)
self.client._LoadHost('http://nonsecurehost')
self.assertEqual(self.client.hostname, 'nonsecurehost')
self.assertEqual(self.client.port, None)
self.assertFalse(self.client.use_https)
self.client._LoadHost('https://dev1.latest.%s' % client.SERVER_HOSTNAME)
self.assertEqual(
self.client.hostname, 'dev1.latest.%s' % client.SERVER_HOSTNAME)
self.assertEqual(self.client.port, None)
self.assertTrue(self.client.use_https)
self.client._LoadHost('http://dev2.latest.%s' % client.SERVER_HOSTNAME)
self.assertEqual(
self.client.hostname, 'dev2.latest.%s' % client.SERVER_HOSTNAME)
self.assertEqual(self.client.port, None)
self.assertFalse(self.client.use_https)
self.client._LoadHost('http://nonsecurehost:1234')
self.assertEqual(self.client.hostname, 'nonsecurehost')
self.assertEqual(self.client.port, 1234)
self.assertFalse(self.client.use_https)
self.client._LoadHost(u'http://unicodehost')
self.assertTrue(type(self.client.hostname) is str)
self.assertEqual(self.client.hostname, 'unicodehost')
self.client._LoadHost(u'http://unicodehost', proxy=u'http://evilproxy:9')
self.assertTrue(type(self.client.hostname) is str)
self.assertEqual(self.client.hostname, 'unicodehost')
self.assertTrue(type(self.client.proxy_hostname) is str)
self.assertEqual(self.client.proxy_hostname, 'evilproxy')
self.assertEqual(self.client.proxy_port, 9)
self.assertFalse(self.client.proxy_use_https)
self.client._LoadHost(u'http://unicodehost', proxy=u'https://evilprxssl:8')
self.assertTrue(type(self.client.hostname) is str)
self.assertEqual(self.client.hostname, 'unicodehost')
self.assertTrue(type(self.client.proxy_hostname) is str)
self.assertEqual(self.client.proxy_hostname, 'evilprxssl')
self.assertEqual(self.client.proxy_port, 8)
self.assertTrue(self.client.proxy_use_https)
self.mox.VerifyAll()
def testSetCACertChain(self):
"""Test SetCACertChain()."""
self.client.SetCACertChain('foo')
self.assertEqual(self.client._ca_cert_chain, 'foo')
def _TestConnect(self, test_client, hostname, port):
"""Test _Connect()."""
m = self.mox.CreateMockAnything()
# we stub this out weirdly because the parent class isn't an object,
# it's an oldschool Python class.
test_client._ca_cert_chain = 'cert chain'
use_https = (
(not test_client.proxy_hostname and test_client.use_https) or
(test_client.proxy_hostname and test_client.proxy_use_https))
if use_https:
self.stubs.Set(client, 'HTTPSMultiBodyConnection', m)
else:
self.stubs.Set(client, 'HTTPMultiBodyConnection', m)
m(hostname, port).AndReturn(m)
if use_https:
m.SetCACertChain('cert chain').AndReturn(None)
m.connect().AndReturn(None)
self.mox.ReplayAll()
test_client._Connect()
self.mox.VerifyAll()
def testConnect(self):
self._TestConnect(self.client, self.hostname, self.port)
def testConnectWithProxy(self):
test_client = client.HttpsClient(self.hostname, proxy='proxyhost:123')
self._TestConnect(test_client, 'proxyhost', 123)
def testGetResponseNoFile(self):
"""Test _GetResponse() storing body directly into response obj."""
headers = {'foo': 1}
status = 200
body = 'howdy sir'
body_len = len(body)
conn = self.mox.CreateMockAnything()
response = self.mox.CreateMockAnything()
conn.getresponse().AndReturn(response)
response.getheaders().AndReturn(headers)
response.status = status
response.read().AndReturn(body)
self.mox.ReplayAll()
r = self.client._GetResponse(conn)
self.assertEqual(r.headers, headers)
self.assertEqual(r.status, status)
self.assertEqual(r.body, body)
self.assertEqual(r.body_len, body_len)
self.mox.VerifyAll()
def testGetResponseOutputFile(self):
"""Test _GetResponse() sending the body to output_file."""
headers = {'foo': 1}
status = 200
body = 'howdy sir'
body_len = len(body)
conn = self.mox.CreateMockAnything()
response = self.mox.CreateMockAnything()
output_file = self.mox.CreateMockAnything()
conn.getresponse().AndReturn(response)
response.getheaders().AndReturn(headers)
response.status = status
response.read(8192).AndReturn(body)
output_file.write(body).AndReturn(None)
response.read(8192).AndReturn(None)
self.mox.ReplayAll()
r = self.client._GetResponse(conn, output_file=output_file)
self.assertEqual(r.headers, headers)
self.assertEqual(r.status, status)
self.assertEqual(r.body, None)
self.assertEqual(r.body_len, body_len)
self.mox.VerifyAll()
def testRequest(self):
"""Test _Request()."""
method = 'zGET'
url = u'/url'
body1 = {'encodeme': 1}
body1_encoded = 'encodeme:: 1'
body2 = 'leave this alone'
headers = {'User-Agent': 'gzip'}
conn = self.mox.CreateMockAnything()
self.stubs.Set(
client, 'urllib', self.mox.CreateMockAnything(client.urllib))
client.urllib.urlencode(body1).AndReturn(body1_encoded)
conn.request(
method, str(url), body=body1_encoded, headers=headers).AndReturn(None)
conn.request(
method, str(url), body=body2, headers=headers).AndReturn(None)
self.mox.ReplayAll()
self.client._Request(method, conn, url, body1, headers)
self.client._Request(method, conn, url, body2, headers)
self.mox.VerifyAll()
def _TestDoRequestResponse(self, test_client, url, req_url):
"""Test _DoRequestResponse()."""
method = 'zomg'
conn = self.mox.CreateMockAnything()
body = 'body'
headers = 'headers'
output_file = None
response = self.mox.CreateMockAnything()
response.status = 200
proxy_use_https = test_client.proxy_use_https
self.mox.StubOutWithMock(test_client, '_Connect')
self.mox.StubOutWithMock(test_client, '_Request')
self.mox.StubOutWithMock(test_client, '_GetResponse')
test_client._Connect().AndReturn(conn)
test_client._Request(
method, conn, req_url, body=body, headers=headers).AndReturn(None)
test_client._GetResponse(
conn, output_file=output_file).AndReturn(response)
test_client._Connect().AndRaise(client.httplib.HTTPException)
self.mox.ReplayAll()
self.assertEqual(
response,
test_client._DoRequestResponse(
method, url, body, headers, output_file))
self.assertRaises(
client.HTTPError,
test_client._DoRequestResponse,
method, url, body, headers, output_file)
self.mox.VerifyAll()
def testDoRequestResponse(self):
self._TestDoRequestResponse(self.client, '/url', '/url')
def testDoHttpRequestResponseWithHttpProxy(self):
"""Test a https request via a http proxy."""
test_client = client.HttpsClient(
'http://%s' % self.hostname, proxy='proxyhost:123')
req_url = 'http://' + self.hostname + '/url'
self._TestDoRequestResponse(test_client, '/url', req_url)
def testDoHttpsRequestResponseWithHttpProxy(self):
"""Test a https request via a http proxy."""
# default is https
test_client = client.HttpsClient(
self.hostname, proxy='http://proxyhost:124')
req_url = 'https://' + self.hostname + '/url'
self._TestDoRequestResponse(test_client, '/url', req_url)
def testDoHttpRequestResponseWithHttpsProxy(self):
"""Test a https request via a http proxy."""
test_client = client.HttpsClient(
'http://%s' % self.hostname, proxy='https://proxyhost:125')
req_url = 'http://' + self.hostname + '/url'
self._TestDoRequestResponse(test_client, '/url', req_url)
def testDoHttpsRequestResponseWithHttpsProxy(self):
"""Test a https request via a http proxy."""
# default is https
test_client = client.HttpsClient(
self.hostname, proxy='https://proxyhost:126')
req_url = 'https://' + self.hostname + '/url'
self._TestDoRequestResponse(test_client, '/url', req_url)
def testDoWithInvalidMethod(self):
"""Test Do() with invalid method."""
self.assertRaises(
NotImplementedError,
self.client.Do, 'badmethod', '/url')
def testDo(self):
"""Test Do() with correct arguments and no output_filename."""
method = 'GET'
url = 'url'
body = None
headers = None
output_file = None
output_filename = None
self.mox.StubOutWithMock(client.time, 'sleep')
self.mox.StubOutWithMock(self.client, '_DoRequestResponse')
# HTTP 500 should retry.
mock_response_fail = self.mox.CreateMockAnything()
mock_response_fail.status = 500
client.time.sleep(0).AndReturn(None)
self.client._DoRequestResponse(
method, url, body=body, headers={}, output_file=output_file).AndReturn(
mock_response_fail)
# HTTP 200 should succeed.
mock_response = self.mox.CreateMockAnything()
mock_response.status = 200
client.time.sleep(5).AndReturn(None)
self.client._DoRequestResponse(
method, url, body=body, headers={}, output_file=output_file).AndReturn(
mock_response)
self.mox.ReplayAll()
self.client.Do(method, url, body, headers, output_filename)
self.mox.VerifyAll()
def testDoWithRetryHttp500(self):
"""Test Do() with a HTTP 500, thus a retry."""
method = 'GET'
url = 'url'
body = None
headers = None
output_file = None
output_filename = None
self.mox.StubOutWithMock(client.time, 'sleep')
mock_response = self.mox.CreateMockAnything()
mock_response.status = 500
self.mox.StubOutWithMock(self.client, '_DoRequestResponse')
for i in xrange(0, client.DEFAULT_HTTP_ATTEMPTS):
client.time.sleep(i * 5).AndReturn(None)
self.client._DoRequestResponse(
method, url, body=body, headers={},
output_file=output_file).AndReturn(mock_response)
self.mox.ReplayAll()
r = self.client.Do(method, url, body, headers, output_filename)
self.mox.VerifyAll()
def testDoWithRetryHttpError(self):
"""Test Do() with a HTTP 500, thus a retry, but ending with HTTPError."""
method = 'GET'
url = 'url'
body = None
headers = None
output_file = None
output_filename = None
self.mox.StubOutWithMock(client.time, 'sleep')
self.mox.StubOutWithMock(self.client, '_DoRequestResponse')
for i in xrange(0, client.DEFAULT_HTTP_ATTEMPTS):
client.time.sleep(i * 5).AndReturn(None)
self.client._DoRequestResponse(
method, url, body=body, headers={},
output_file=output_file).AndRaise(client.HTTPError)
self.mox.ReplayAll()
self.assertRaises(
client.HTTPError,
self.client.Do,
method, url, body, headers, output_filename)
self.mox.VerifyAll()
def testDoWithOutputFilename(self):
"""Test Do() where an output_filename is supplied."""
method = 'GET'
url = 'url'
body = None
headers = {}
mock_open = self.mox.CreateMockAnything()
output_file = self.mox.CreateMockAnything()
output_filename = '/tmpfile'
mock_response = self.mox.CreateMockAnything()
mock_response.status = 200
self.mox.StubOutWithMock(self.client, '_DoRequestResponse')
mock_open(output_filename, 'w').AndReturn(output_file)
self.client._DoRequestResponse(
method, url, body=body, headers={}, output_file=output_file).AndReturn(
mock_response)
output_file.close().AndReturn(None)
self.mox.ReplayAll()
self.client.Do(
method, url, body, headers, output_filename, _open=mock_open)
self.mox.VerifyAll()
def testDoWithProxy(self):
"""Test Do() with a proxy specified."""
method = 'GET'
url = 'url'
proxy = 'proxyhost:123'
# Working case.
mock_response = self.mox.CreateMockAnything()
mock_response.status = 200
test_client = client.HttpsClient(self.hostname, proxy=proxy)
self.mox.StubOutWithMock(test_client, '_DoRequestResponse')
test_client._DoRequestResponse(
method, url, body=None, headers={}, output_file=None).AndReturn(
mock_response)
self.mox.ReplayAll()
test_client.Do(method, url)
self.mox.VerifyAll()
# No port case.
proxy = 'proxyhost'
self.assertRaises(
client.Error,
client.HttpsClient, self.hostname, proxy=proxy)
# Bad port case.
proxy = 'proxyhost:alpha'
self.assertRaises(
client.Error,
client.HttpsClient, self.hostname, proxy=proxy)
class HttpsAuthClientTest(mox.MoxTestBase):
"""Test HttpsAuthClient."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.hostname = 'hostname'
self.port = None
self.client = client.HttpsAuthClient(self.hostname)
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testInit(self):
"""Test __init__()."""
self.mox.StubOutWithMock(client.HttpsAuthClient, '_LoadRootCertChain')
client.HttpsAuthClient._LoadRootCertChain().AndReturn(None)
self.mox.ReplayAll()
c = client.HttpsAuthClient(self.hostname)
self.assertEqual(c._auth1, None)
self.assertEqual(c._cookie_token, None)
self.mox.VerifyAll()
def testPlatformSetup(self):
"""Test PlatformSetup()."""
self.mox.StubOutWithMock(client.platform, 'system')
client.platform.system().AndReturn('Darwin')
client.platform.system().AndReturn('other')
self.mox.ReplayAll()
self.client.facter_cache_path = 'x'
self.client._PlatformSetup()
self.assertEqual(
self.client.facter_cache_path, self.client.FACTER_CACHE_OSX_PATH)
self.client.facter_cache_path = 'x'
self.client._PlatformSetup()
self.assertEqual(
self.client.facter_cache_path, self.client.FACTER_CACHE_DEFAULT_PATH)
self.mox.VerifyAll()
def testGetFacter(self):
"""Test GetFacter()."""
st_dt = client.datetime.datetime.now()
self.client.facter_cache_path = '/x'
mock_open = self.mox.CreateMockAnything()
mock_file = self.mox.CreateMockAnything()
stat = self.mox.CreateMockAnything()
mock_dt = self.mox.CreateMockAnything()
self.stubs.Set(client.datetime, 'datetime', mock_dt)
self.mox.StubOutWithMock(client.os, 'stat')
self.mox.StubOutWithMock(client.os, 'geteuid')
self.mox.StubOutWithMock(client.os.path, 'isfile')
client.os.path.isfile(self.client.facter_cache_path).AndReturn(True)
stat.st_uid = 0
stat.st_mtime = int(st_dt.strftime('%s'))
facter = {'foo': 'bar', 'one': '1'}
lines = [
'foo => bar',
'one => 1',
'I_am_invalid',
]
client.os.stat(self.client.facter_cache_path).AndReturn(stat)
client.os.geteuid().AndReturn(0)
client.os.geteuid().AndReturn(0)
mock_dt.fromtimestamp(stat.st_mtime).AndReturn(st_dt)
mock_open(self.client.facter_cache_path, 'r').AndReturn(mock_file)
for line in lines:
mock_file.readline().AndReturn(line)
mock_file.readline().AndReturn(None)
mock_file.close().AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(facter, self.client.GetFacter(open_fn=mock_open))
self.mox.VerifyAll()
def testGetFacterWhenInsecureFileForRoot(self):
"""Test GetFacter()."""
self.client.facter_cache_path = '/x'
mock_open = self.mox.CreateMockAnything()
stat = self.mox.CreateMockAnything()
mock_dt = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(client.os, 'stat')
self.mox.StubOutWithMock(client.os, 'geteuid')
self.mox.StubOutWithMock(client.os.path, 'isfile')
client.os.path.isfile(self.client.facter_cache_path).AndReturn(True)
stat.st_uid = 100
client.os.stat(self.client.facter_cache_path).AndReturn(stat)
client.os.geteuid().AndReturn(0)
self.mox.ReplayAll()
self.assertEqual({}, self.client.GetFacter(open_fn=mock_open))
self.mox.VerifyAll()
def testGetFacterWhenInsecureFileForNonRoot(self):
"""Test GetFacter()."""
self.client.facter_cache_path = '/x'
mock_open = self.mox.CreateMockAnything()
stat = self.mox.CreateMockAnything()
mock_dt = self.mox.CreateMockAnything()
self.stubs.Set(client.datetime, 'datetime', mock_dt)
self.mox.StubOutWithMock(client.os, 'stat')
self.mox.StubOutWithMock(client.os, 'geteuid')
self.mox.StubOutWithMock(client.os.path, 'isfile')
client.os.path.isfile(self.client.facter_cache_path).AndReturn(True)
stat.st_uid = 100
client.os.stat(self.client.facter_cache_path).AndReturn(stat)
client.os.geteuid().AndReturn(200)
client.os.geteuid().AndReturn(200)
client.os.geteuid().AndReturn(200)
self.mox.ReplayAll()
self.assertEqual({}, self.client.GetFacter(open_fn=mock_open))
self.mox.VerifyAll()
def testGetFacterWhenCacheDoesNotExist(self):
"""Test GetFacter() with a nonexistent cache file."""
self.client.facter_cache_path = '/x'
self.mox.StubOutWithMock(client.os.path, 'isfile')
client.os.path.isfile(self.client.facter_cache_path).AndReturn(False)
self.mox.ReplayAll()
self.assertEqual({}, self.client.GetFacter())
self.mox.VerifyAll()
def testGetFacterWhenCachePathIsNone(self):
"""Test GetFacter() with facter_cache_path is None."""
self.client.facter_cache_path = None
self.mox.ReplayAll()
self.assertEqual({}, self.client.GetFacter())
self.mox.VerifyAll()
def testDoUAuth(self):
"""Test DoUAuth()."""
self.mox.StubOutWithMock(client.os, 'isatty')
self.mox.StubOutWithMock(client, 'UAuth')
mock_ua = self.mox.CreateMockAnything()
client.os.isatty(client.sys.stdin.fileno()).AndReturn(True)
client.UAuth(
hostname=self.client.netloc, interactive_user=True).AndReturn(mock_ua)
mock_ua.SetCACertChain(self.client._ca_cert_chain).AndReturn(None)
mock_ua.Login().AndReturn('token')
self.mox.ReplayAll()
self.client.DoUAuth()
self.assertEqual('token', self.client._cookie_token)
self.mox.VerifyAll()
def testDoUAuthWhenNoToken(self):
"""Test DoUAuth() when no token returned."""
self.mox.StubOutWithMock(client.os, 'isatty')
self.mox.StubOutWithMock(client, 'UAuth')
mock_ua = self.mox.CreateMockAnything()
client.os.isatty(client.sys.stdin.fileno()).AndReturn(True)
client.UAuth(
hostname=self.client.netloc, interactive_user=True).AndReturn(mock_ua)
mock_ua.SetCACertChain(self.client._ca_cert_chain).AndReturn(None)
mock_ua.Login().AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(client.SimianClientError, self.client.DoUAuth)
self.mox.VerifyAll()
def testDoUserAuth(self):
"""Test DoUserAuth()."""
self.mox.StubOutWithMock(self.client, 'DoUAuth')
self.client.DoUAuth().AndReturn(None)
self.mox.ReplayAll()
self.client.DoUserAuth()
self.mox.VerifyAll()
def testDoSimianAuth(self):
"""Test DoSimianAuth()."""
# TODO(user)
class UAuthTest(mox.MoxTestBase):
"""Test UAuth."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.hostname = 'hostname'
self.client = client.UAuth(self.hostname, interactive_user=False)
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def _MockAuthFunction(self):
self.client._AuthFunction = lambda: self.username, self.password
def testInit(self):
"""Test __init__()."""
self.assertEqual(self.client.hostname, self.hostname)
self.assertEqual(self.client.interactive_user, False)
def testAuthFunction(self):
"""Test _AuthFunction()."""
self.assertRaises(client.SimianClientError, self.client._AuthFunction)
self.client.interactive_user = True
self.mox.StubOutWithMock(client.getpass, 'getuser')
self.mox.StubOutWithMock(client.getpass, 'getpass')
client.getpass.getuser().AndReturn('joe')
email = 'joe@%s' % client.AUTH_DOMAIN
client.getpass.getpass('%s password: ' % email).AndReturn('pass')
self.mox.ReplayAll()
self.assertEqual((email, 'pass'), self.client._AuthFunction())
self.mox.VerifyAll()
def testLogin(self):
"""Test Login()."""
self.mox.StubOutWithMock(client, 'AppEngineHttpRpcServer')
self.mox.StubOutWithMock(client.auth_client, 'AuthSimianClient')
mock_s = self.mox.CreateMockAnything()
response = 'response'
mock_auth1 = self.mox.CreateMockAnything()
self.client._AuthFunction = self._MockAuthFunction
client.AppEngineHttpRpcServer(
self.hostname, self._MockAuthFunction, None, 'ah',
save_cookies=True, secure=True).AndReturn(mock_s)
mock_s.Send('/uauth').AndReturn(response)
client.auth_client.AuthSimianClient().AndReturn(mock_auth1)
mock_auth1.LoadCaParameters(client.settings).AndReturn(None)
mock_auth1.Input(t=response)
mock_auth1.AuthStateOK().AndReturn(True)
mock_s.cookie_jar = [self.mox.CreateMockAnything()]
mock_s.cookie_jar[0].domain = self.hostname
mock_s.cookie_jar[0].name = response
mock_s.cookie_jar[0].secure = True
mock_s.cookie_jar[0].value = 'value'
cookie = '%s=%s; %s; httponly;' % (response, 'value', 'secure')
self.mox.ReplayAll()
self.assertEqual(self.client.Login(), cookie)
self.mox.VerifyAll()
def testLoginWhenNoOutput(self):
"""Test Login()."""
self.mox.StubOutWithMock(client, 'AppEngineHttpRpcServer')
self.mox.StubOutWithMock(client.auth_client, 'AuthSimianClient')
mock_s = self.mox.CreateMockAnything()
response = 'response'
mock_auth1 = self.mox.CreateMockAnything()
self.client._AuthFunction = self._MockAuthFunction
client.AppEngineHttpRpcServer(
self.hostname, self._MockAuthFunction, None, 'ah',
save_cookies=True, secure=True).AndReturn(mock_s)
mock_s.Send('/uauth').AndReturn(response)
client.auth_client.AuthSimianClient().AndReturn(mock_auth1)
mock_auth1.LoadCaParameters(client.settings).AndReturn(None)
mock_auth1.Input(t=response)
mock_auth1.AuthStateOK().AndReturn(True)
mock_s.cookie_jar = [self.mox.CreateMockAnything()]
mock_s.cookie_jar[0].domain = self.hostname
mock_s.cookie_jar[0].name = 'not-%s' % response
mock_s.cookie_jar[0].secure = True
mock_s.cookie_jar[0].value = 'value'
self.mox.ReplayAll()
self.assertRaises(client.SimianClientError, self.client.Login)
self.mox.VerifyAll()
class SimianClientTest(mox.MoxTestBase):
"""Test SimianClient class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.hostname = 'hostname'
self.port = None
self.client = client.SimianClient(self.hostname)
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testInitWithoutHostname(self):
"""Test __init__() without a hostname passed."""
user = 'foouser'
self.mox.StubOutWithMock(
client.SimianClient, '_GetLoggedOnUser', self.mox.CreateMockAnything())
client.SimianClient._GetLoggedOnUser().AndReturn(user)
self.mox.ReplayAll()
clienttmp = client.SimianClient()
self.assertEqual(clienttmp.hostname, client.SERVER_HOSTNAME)
self.assertEqual(clienttmp._user, user)
self.mox.VerifyAll()
def testInitWithHostname(self):
"""Test __init__() with a hostname passed."""
user = 'foouser'
self.mox.StubOutWithMock(
client.SimianClient, '_GetLoggedOnUser', self.mox.CreateMockAnything())
client.SimianClient._GetLoggedOnUser().AndReturn(user)
self.mox.ReplayAll()
clienttmp = client.SimianClient('foo')
self.assertEqual(clienttmp.hostname, 'foo')
self.assertEqual(clienttmp._user, user)
self.mox.VerifyAll()
def testInitAsRoot(self):
"""Test __init__() with a hostname passed."""
self.mox.StubOutWithMock(
client.SimianClient, '_GetLoggedOnUser', self.mox.CreateMockAnything())
client.SimianClient._GetLoggedOnUser().AndReturn('root')
self.mox.ReplayAll()
self.assertRaises(client.SimianClientError, client.SimianClient)
self.mox.VerifyAll()
def testIsDefaultHostClient(self):
"""Test IsDefaultHostClient()."""
self.client._default_hostname = 'foo'
self.assertEqual(self.client.IsDefaultHostClient(), 'foo')
def testSimianRequest(self):
"""Test _SimianRequest()."""
method = 'zGET'
url = '/url'
headers = {'foo': 'bar'}
output_filename = None
good_response = client.Response(status=200, body='hello there')
self.mox.StubOutWithMock(self.client, 'Do')
self.client.Do(
method, url, body=None, headers=headers,
output_filename=output_filename).AndReturn(good_response)
self.mox.ReplayAll()
self.assertEqual(
good_response.body,
self.client._SimianRequest(method, url, headers=headers))
self.mox.VerifyAll()
def testSimianRequestWithError(self):
"""Test _SimianRequest() with an error status returned."""
method = 'zGET'
url = '/url'
headers = {'foo': 'bar'}
output_filename = None
error_response = client.Response(status=401, body='fooerror')
self.mox.StubOutWithMock(self.client, 'Do')
self.client.Do(
method, url, body=None, headers=headers,
output_filename=output_filename).AndReturn(error_response)
self.mox.ReplayAll()
self.assertRaises(
client.SimianServerError,
self.client._SimianRequest, method, url, headers=headers)
self.mox.VerifyAll()
def GenericStubTestAndReturn(
self,
method,
method_return,
method_args,
stub_method_name, stub_method_return, *stub_args, **stub_kwargs):
"""Helper test method.
TODO(user): Move to common.test.
Args:
method: method, to invoke in the test
method_return: any, value to expect from method
method_args: list, arguments to send to method during test
stub_method_name: str, method name to stub out in SimianClient class
stub_method_return: any, value to return from stubbed method call
stub_args: list, args to expect when calling stub_method_name
stub_kwargs: dict, kwargs to expect when calling stub_method_name
"""
self.mox.StubOutWithMock(self.client, stub_method_name)
getattr(self.client, stub_method_name)(
*stub_args, **stub_kwargs).AndReturn(stub_method_return)
self.mox.ReplayAll()
got_rv = method(*method_args)
self.assertEqual(got_rv, method_return)
self.mox.VerifyAll()
def GenericStubTest(
self,
method, method_args,
stub_method_name, *stub_args, **stub_kwargs):
"""Helper test method.
TODO(user): Move to common.test.
Args:
method: method, to invoke in the test
method_args: list, arguments to send to method during test
stub_method_name: str, method name to stub out in SimianClient class
stub_args: list, args to expect when calling stub_method_name
stub_kwargs: dict, kwargs to expect when calling stub_method_name
Returns:
string, 'returnval'
"""
rv = 'returnval'
return self.GenericStubTestAndReturn(
method, rv, method_args,
stub_method_name, rv, *stub_args, **stub_kwargs)
def testGetCatalog(self):
"""Test GetCatalog()."""
name = 'name'
self.GenericStubTest(
self.client.GetCatalog, [name],
'_SimianRequest', 'GET', '/catalog/%s' % name)
def testGetManifest(self):
"""Test GetManifest()."""
name = 'name'
self.GenericStubTest(
self.client.GetManifest, [name],
'_SimianRequest', 'GET', '/manifest/%s' % name)
def testGetPackage(self):
"""Test GetPackage()."""
name = 'name'
self.GenericStubTest(
self.client.GetPackage, [name],
'_SimianRequest', 'GET', '/pkgs/%s' % name, output_filename=None)
def testSimianFormUpload(self):
"""Test _SimianFormUpload()."""
user = 'foouser'
fqdn_user = '%s@%s' % (user, client.AUTH_DOMAIN)
name = u'hebrew \u05d7'
name_utf8_str = 'hebrew \xd7\x97'
params = {'pkginfo': u'fooinfo \u05d7'}
updated_params = {
'name': name_utf8_str, 'pkginfo': 'fooinfo \xd7\x97', 'user': fqdn_user}
self.client._user = user
mock_response = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.client, 'DoMultipart')
self.client.DoMultipart(
'', updated_params, name_utf8_str, input_filename='',
input_file='').AndReturn(mock_response)
mock_response.IsError().AndReturn(False)
self.mox.ReplayAll()
self.client._SimianFormUpload('', name, params, '', '')
self.mox.VerifyAll()
def testPutPackage(self):
"""Test PutPackage()."""
filename = 'name.dmg'
params = {'pkginfo': 'xml'}
post_path = '/_ah/upload-here.cgi'
post_url = 'http://%s%s' % (self.hostname, post_path)
redirect_path = '/ok?foo=bar'
redirect_url = 'http://%s%s' % (self.hostname, redirect_path)
input_file = 'input_file'
result = 'result'
mock_response = self.mox.CreateMockAnything()
self.assertRaises(client.Error, self.client.PutPackage, filename, params)
self.mox.StubOutWithMock(self.client, '_SimianRequest')
self.mox.StubOutWithMock(self.client, '_SimianFormUpload')
self.client._SimianRequest('GET', '/uploadpkg').AndReturn(post_url)
self.client._SimianFormUpload(
post_path, filename, params,
input_file=input_file, input_filename=None).AndReturn(
mock_response)
mock_response.headers = {'location': redirect_url}
mock_response.IsRedirect().AndReturn(True)
self.client._SimianRequest('GET', redirect_path).AndReturn(result)
self.mox.ReplayAll()
self.assertRaises(client.Error, self.client.PutPackage, filename, params)
self.assertEqual(
result,
self.client.PutPackage(filename, params, input_file=input_file))
self.mox.VerifyAll()
def testPutPackageWhenNotRedirect(self):
"""Test PutPackage() where a redirect was not received.
Lack of redirect indicates a blob upload failure.
"""
filename = 'name.dmg'
params = {'pkginfo': 'xml'}
post_path = '/_ah/upload-here.cgi'
post_url = 'http://%s%s' % (self.hostname, post_path)
redirect_path = '/ok?foo=bar'
redirect_url = 'http://%s%s' % (self.hostname, redirect_path)
input_file = 'input_file'
mock_response = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.client, '_SimianRequest')
self.mox.StubOutWithMock(self.client, '_SimianFormUpload')
self.client._SimianRequest('GET', '/uploadpkg').AndReturn(post_url)
self.client._SimianFormUpload(
post_path, filename, params,
input_file=input_file, input_filename=None).AndReturn(
mock_response)
mock_response.headers = {'location': redirect_url}
mock_response.IsRedirect().AndReturn(False)
mock_response.__str__().AndReturn('error response')
self.mox.ReplayAll()
self.assertRaises(
client.SimianClientError,
self.client.PutPackage,
filename, params, input_file=input_file)
self.mox.VerifyAll()
def testPutPackageWithDifferentUploadHost(self):
"""Test PutPackage() where a different upload host is returned."""
url = 'https://foohost/foopath'
self.client.netloc = 'something different'
self.mox.StubOutWithMock(self.client, '_SimianRequest')
self.client._SimianRequest('GET', '/uploadpkg').AndReturn(url)
self.mox.ReplayAll()
self.assertRaises(
client.SimianClientError,
self.client.PutPackage, '', '', 'anyfile')
self.mox.VerifyAll()
def testGetPackageInfo(self):
"""Test GetPackageInfo()."""
filename = 'name.dmg'
response = self.mox.CreateMockAnything()
response.body = 'hello'
self.GenericStubTestAndReturn(
self.client.GetPackageInfo,
'hello',
[filename],
'_SimianRequest',
response,
'GET', '/pkgsinfo/%s' % filename, full_response=True)
def testGetPackageInfoWhenHash(self):
"""Test GetPackageInfo()."""
filename = 'name.dmg'
response = self.mox.CreateMockAnything()
response.body = 'body'
response.headers = {'x-pkgsinfo-hash': 'hash'}
self.GenericStubTestAndReturn(
self.client.GetPackageInfo, ('hash', 'body'),
[filename, True],
'_SimianRequest',
response,
'GET', '/pkgsinfo/%s?hash=1' % filename, full_response=True)
def testPutPackageInfo(self):
"""Test PutPackageInfo()."""
filename = 'some pkg.dmg'
quoted_name = 'some%20pkg.dmg'
catalogs = ['catalog1', 'catalog2']
manifests = ['manifest1', 'manifest2']
install_types = ['type1', 'type2']
pkginfo = u'<plist>etc\u2665</plist>'
pkginfo_utf8 = pkginfo.encode('utf-8')
got_hash = 'hash'
url = '/pkgsinfo/%s?catalogs=%s&manifests=%s&install_types=%s&hash=%s' % (
quoted_name, ','.join(catalogs), ','.join(manifests),
','.join(install_types), got_hash)
self.GenericStubTest(
self.client.PutPackageInfo,
[filename, pkginfo, catalogs, manifests, install_types, got_hash],
'_SimianRequest', 'PUT', url, pkginfo_utf8)
def testPutPackageInfoWhenSetNoManifests(self):
"""Test PutPackageInfo()."""
filename = 'some pkg.dmg'
quoted_name = 'some%20pkg.dmg'
catalogs = ['catalog1', 'catalog2']
manifests = ''
install_types = ['type1', 'type2']
pkginfo = u'<plist>etc\u2665</plist>'
pkginfo_utf8 = pkginfo.encode('utf-8')
got_hash = 'hash'
url = '/pkgsinfo/%s?catalogs=%s&manifests=%s&install_types=%s&hash=%s' % (
quoted_name, ','.join(catalogs), manifests,
','.join(install_types), got_hash)
self.GenericStubTest(
self.client.PutPackageInfo,
[filename, pkginfo, catalogs, manifests, install_types, got_hash],
'_SimianRequest', 'PUT', url, pkginfo_utf8)
def testDeletePackage(self):
"""Test DeletePackage()."""
filename = 'foo'
self.GenericStubTest(
self.client.DeletePackage,
[filename],
'_SimianRequest', 'POST', '/deletepkg', {'filename': filename})
def testDownloadPackage(self):
"""Test DownloadPackage()."""
filename = 'foo'
self.GenericStubTest(
self.client.DownloadPackage,
[filename],
'_SimianRequest', 'GET',
'/pkgs/%s' % filename, output_filename=filename)
def testPostReport(self):
"""Test PostReport()."""
report_type = 'foo'
params = {'bar': 1}
url = '/reports'
body = '_report_type=%s&%s' % (
report_type,
client.urllib.urlencode(params, doseq=True))
self.GenericStubTest(
self.client.PostReport, [report_type, params],
'_SimianRequest', 'POST', url, body)
def testPostReportWhenFeedback(self):
"""Test PostReport()."""
report_type = 'foo'
params = {'bar': 1}
url = '/reports'
body = '_report_type=%s&%s&_feedback=1' % (
report_type,
client.urllib.urlencode(params, doseq=True))
self.GenericStubTest(
self.client.PostReport, [report_type, params, True],
'_SimianRequest', 'POST', url, body)
def testPostReportBody(self):
"""Test PostReportBody()."""
url = '/reports'
body = 'foo'
self.GenericStubTest(
self.client.PostReportBody, [body],
'_SimianRequest', 'POST', url, body)
def testPostReportBodyWhenFeedback(self):
"""Test PostReportBody()."""
url = '/reports'
body = 'foo'
body_with_feedback = 'foo&_feedback=1'
self.GenericStubTest(
self.client.PostReportBody, [body, True],
'_SimianRequest', 'POST', url, body_with_feedback)
def testUploadFile(self):
"""Test UploadFile()."""
self.mox.StubOutWithMock(client.os.path, 'isfile')
self.mox.StubOutWithMock(self.client, 'Do')
file_type = 'log'
file_name = 'file.log'
file_path = 'path/to/' + file_name
url = '/uploadfile/%s/%s' % (file_type, file_name)
mock_open = self.mox.CreateMockAnything()
mock_open(file_path, 'r').AndReturn(mock_open)
client.os.path.isfile(file_path).AndReturn(True)
self.client.Do('PUT', url, mock_open).AndReturn(None)
mock_open.close().AndReturn(None)
self.mox.ReplayAll()
self.client.UploadFile(file_path, file_type, _open=mock_open)
self.mox.VerifyAll()
def testUploadFileWhenLogNotFound(self):
"""Test UploadFile() when the file is not found."""
self.mox.StubOutWithMock(client.os.path, 'isfile')
self.mox.StubOutWithMock(client.logging, 'error')
file_path = 'path/to/file.log'
client.os.path.isfile(file_path).AndReturn(False)
client.logging.error('UploadFile file not found: %s', file_path)
self.mox.ReplayAll()
self.client.UploadFile(file_path, 'foo-file-type')
self.mox.VerifyAll()
def testIsPackageUploadNecessary(self):
"""Test _IsPackageUploadNecessary()."""
filename = 'filename'
pkginfo = 'pkginfo'
self.assertTrue(self.client._IsPackageUploadNecessary(filename, pkginfo))
def testUploadPackageWhenUploadNecessary(self):
"""Test UploadPackage()."""
file_path = '/path/to/filename.dmg'
filename = 'filename.dmg'
description = 'foo package description!!'
display_name = 'Foo Package'
pkginfo = 'pkginfo'
catalogs = ['catalog1', 'catalog2']
manifests = ['manifest1', 'manifest2']
install_types = ['managed_installs', 'managed_updates']
params = {
'pkginfo': pkginfo,
'catalogs': ','.join(catalogs),
'manifests': ','.join(manifests),
'install_types': ','.join(install_types),
}
self.mox.StubOutWithMock(
self.client, 'PutPackage', self.mox.CreateMockAnything())
self.mox.StubOutWithMock(self.client, '_IsPackageUploadNecessary')
self.client._IsPackageUploadNecessary(file_path, pkginfo).AndReturn(True)
self.client.PutPackage(
filename, params, input_filename=file_path).AndReturn('Success')
self.mox.ReplayAll()
r = self.client.UploadPackage(
file_path, description, display_name, catalogs, manifests,
install_types, pkginfo)
self.assertEqual(r[0], 'Success')
self.assertEqual(r[1], filename)
self.assertEqual(r[2], catalogs)
self.assertEqual(r[3], manifests)
self.assertEqual(len(r), 4)
self.mox.VerifyAll()
def testUploadPackageWhenUploadNotNecessary(self):
"""Test UploadPackage()."""
file_path = '/path/to/filename.dmg'
filename = 'filename.dmg'
description = 'foo package description!!'
display_name = 'Foo Package'
pkginfo = 'pkginfo'
catalogs = ['catalog1', 'catalog2']
manifests = ['manifest1', 'manifest2']
install_types = ['managed_installs', 'managed_updates']
self.mox.StubOutWithMock(
self.client, 'PutPackageInfo', self.mox.CreateMockAnything())
self.mox.StubOutWithMock(self.client, '_IsPackageUploadNecessary')
self.client._IsPackageUploadNecessary(file_path, pkginfo).AndReturn(False)
self.client.PutPackageInfo(
filename, pkginfo, catalogs, manifests, install_types).AndReturn(
'Success')
self.mox.ReplayAll()
r = self.client.UploadPackage(
file_path, description, display_name, catalogs, manifests,
install_types, pkginfo)
self.assertEqual(r[0], 'Success')
self.assertEqual(r[1], filename)
self.assertEqual(r[2], catalogs)
self.assertEqual(r[3], manifests)
self.assertEqual(len(r), 4)
self.mox.VerifyAll()
class SimianAuthClientTest(mox.MoxTestBase):
"""Test SimianAuthClient class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.pac = client.SimianAuthClient()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testGetAuthToken(self):
"""Test GetAuthToken()."""
self.mox.StubOutWithMock(self.pac, 'DoSimianAuth')
self.pac._cookie_token = 'token'
self.pac.DoSimianAuth().AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(self.pac.GetAuthToken(), 'token')
self.mox.VerifyAll()
def testLogoutAuthToken(self):
"""Test LogoutAuthToken()."""
url = '/auth?logout=True'
self.mox.StubOutWithMock(self.pac, '_SimianRequest')
self.pac._SimianRequest('GET', url).AndReturn('ok')
self.mox.ReplayAll()
self.assertTrue(self.pac.LogoutAuthToken())
self.mox.VerifyAll()
def testLogoutAuthTokenWhenFail(self):
"""Test LogoutAuthToken()."""
url = '/auth?logout=True'
self.mox.StubOutWithMock(self.pac, '_SimianRequest')
self.pac._SimianRequest('GET', url).AndRaise(client.SimianServerError)
self.mox.ReplayAll()
self.assertFalse(self.pac.LogoutAuthToken())
self.mox.VerifyAll()
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
|
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.test import TestCase
from guardian.shortcuts import assign_perm
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_objects_for_group
from guardian.shortcuts import get_objects_for_user
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import remove_perm
from guardian.testapp.models import Mixed, ReverseMixed
from guardian.testapp.models import Project
from guardian.testapp.models import ProjectGroupObjectPermission
from guardian.testapp.models import ProjectUserObjectPermission
from guardian.testapp.tests.conf import skipUnlessTestApp
User = get_user_model()
@skipUnlessTestApp
class TestDirectUserPermissions(TestCase):
def setUp(self):
self.joe = User.objects.create_user('joe', 'joe@example.com', 'foobar')
self.project = Project.objects.create(name='Foobar')
def get_perm(self, codename):
filters = {'content_type__app_label': 'testapp', 'codename': codename}
return Permission.objects.get(**filters)
def test_after_perm_is_created_without_shortcut(self):
perm = self.get_perm('add_project')
# we should not use assign here - if generic user obj perms model is
# used then everything could go fine if using assign shortcut and we
# would not be able to see any problem
ProjectUserObjectPermission.objects.create(
user=self.joe,
permission=perm,
content_object=self.project,
)
self.assertTrue(self.joe.has_perm('add_project', self.project))
def test_assign_perm(self):
assign_perm('add_project', self.joe, self.project)
filters = {
'content_object': self.project,
'permission__codename': 'add_project',
'user': self.joe,
}
result = ProjectUserObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 1)
def test_remove_perm(self):
assign_perm('add_project', self.joe, self.project)
filters = {
'content_object': self.project,
'permission__codename': 'add_project',
'user': self.joe,
}
result = ProjectUserObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 1)
remove_perm('add_project', self.joe, self.project)
result = ProjectUserObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 0)
def test_get_users_with_perms(self):
User.objects.create_user('john', 'john@foobar.com', 'john')
jane = User.objects.create_user('jane', 'jane@foobar.com', 'jane')
assign_perm('add_project', self.joe, self.project)
assign_perm('change_project', self.joe, self.project)
assign_perm('change_project', jane, self.project)
self.assertEqual(get_users_with_perms(self.project, attach_perms=True),
{
self.joe: ['add_project', 'change_project'],
jane: ['change_project'],
})
def test_get_users_with_perms_plus_groups(self):
User.objects.create_user('john', 'john@foobar.com', 'john')
jane = User.objects.create_user('jane', 'jane@foobar.com', 'jane')
group = Group.objects.create(name='devs')
self.joe.groups.add(group)
assign_perm('add_project', self.joe, self.project)
assign_perm('change_project', group, self.project)
assign_perm('change_project', jane, self.project)
self.assertEqual(get_users_with_perms(self.project, attach_perms=True),
{
self.joe: ['add_project', 'change_project'],
jane: ['change_project'],
})
def test_get_objects_for_user(self):
foo = Project.objects.create(name='foo')
bar = Project.objects.create(name='bar')
assign_perm('add_project', self.joe, foo)
assign_perm('add_project', self.joe, bar)
assign_perm('change_project', self.joe, bar)
result = get_objects_for_user(self.joe, 'testapp.add_project')
self.assertEqual(sorted(p.pk for p in result),
sorted([foo.pk, bar.pk]))
def test_get_all_permissions(self):
foo = Project.objects.create(name='foo')
assign_perm('add_project', self.joe, foo)
assign_perm('change_project', self.joe, foo)
result = self.joe.get_all_permissions(foo)
self.assertEqual(result, set(('add_project', 'change_project')))
def test_get_all_permissions_no_object(self):
foo = Project.objects.create(name='foo')
assign_perm('add_project', self.joe, foo)
assign_perm('change_project', self.joe, foo)
result = self.joe.get_all_permissions()
self.assertEqual(result, set())
@skipUnlessTestApp
class TestDirectGroupPermissions(TestCase):
def setUp(self):
self.joe = User.objects.create_user('joe', 'joe@example.com', 'foobar')
self.group = Group.objects.create(name='admins')
self.joe.groups.add(self.group)
self.project = Project.objects.create(name='Foobar')
def get_perm(self, codename):
filters = {'content_type__app_label': 'testapp', 'codename': codename}
return Permission.objects.get(**filters)
def test_after_perm_is_created_without_shortcut(self):
perm = self.get_perm('add_project')
# we should not use assign here - if generic user obj perms model is
# used then everything could go fine if using assign shortcut and we
# would not be able to see any problem
ProjectGroupObjectPermission.objects.create(
group=self.group,
permission=perm,
content_object=self.project,
)
self.assertTrue(self.joe.has_perm('add_project', self.project))
def test_assign_perm(self):
assign_perm('add_project', self.group, self.project)
filters = {
'content_object': self.project,
'permission__codename': 'add_project',
'group': self.group,
}
result = ProjectGroupObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 1)
def test_remove_perm(self):
assign_perm('add_project', self.group, self.project)
filters = {
'content_object': self.project,
'permission__codename': 'add_project',
'group': self.group,
}
result = ProjectGroupObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 1)
remove_perm('add_project', self.group, self.project)
result = ProjectGroupObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 0)
def test_get_groups_with_perms(self):
Group.objects.create(name='managers')
devs = Group.objects.create(name='devs')
assign_perm('add_project', self.group, self.project)
assign_perm('change_project', self.group, self.project)
assign_perm('change_project', devs, self.project)
self.assertEqual(get_groups_with_perms(self.project, attach_perms=True),
{
self.group: ['add_project', 'change_project'],
devs: ['change_project'],
})
def test_get_groups_with_perms_doesnt_spawn_extra_queries_for_more_groups_with_perms(self):
Group.objects.create(name='managers')
devs = Group.objects.create(name='devs')
devs1 = Group.objects.create(name='devs1')
devs2 = Group.objects.create(name='devs2')
devs3 = Group.objects.create(name='devs3')
devs4 = Group.objects.create(name='devs4')
devs5 = Group.objects.create(name='devs5')
assign_perm('add_project', self.group, self.project)
assign_perm('change_project', self.group, self.project)
for group in [devs, devs1, devs2, devs3, devs4, devs5]:
assign_perm('add_project', group, self.project)
assign_perm('change_project', group, self.project)
with self.assertNumQueries(3):
result = get_groups_with_perms(self.project, attach_perms=True)
self.assertEqual(result,
{
self.group: ['add_project', 'change_project'],
devs: ['add_project', 'change_project'],
devs1: ['add_project', 'change_project'],
devs2: ['add_project', 'change_project'],
devs3: ['add_project', 'change_project'],
devs4: ['add_project', 'change_project'],
devs5: ['add_project', 'change_project'],
})
def test_get_objects_for_group(self):
foo = Project.objects.create(name='foo')
bar = Project.objects.create(name='bar')
assign_perm('add_project', self.group, foo)
assign_perm('add_project', self.group, bar)
assign_perm('change_project', self.group, bar)
result = get_objects_for_group(self.group, 'testapp.add_project')
self.assertEqual(sorted(p.pk for p in result),
sorted([foo.pk, bar.pk]))
@skipUnlessTestApp
class TestMixedDirectAndGenericObjectPermission(TestCase):
def setUp(self):
self.joe = User.objects.create_user('joe', 'joe@example.com', 'foobar')
self.group = Group.objects.create(name='admins')
self.joe.groups.add(self.group)
self.mixed = Mixed.objects.create(name='Foobar')
self.reverse_mixed = ReverseMixed.objects.create(name='Foobar')
def test_get_users_with_perms_plus_groups(self):
User.objects.create_user('john', 'john@foobar.com', 'john')
jane = User.objects.create_user('jane', 'jane@foobar.com', 'jane')
group = Group.objects.create(name='devs')
self.joe.groups.add(group)
assign_perm('add_mixed', self.joe, self.mixed)
assign_perm('change_mixed', group, self.mixed)
assign_perm('change_mixed', jane, self.mixed)
self.assertEqual(get_users_with_perms(self.mixed, attach_perms=True),
{
self.joe: ['add_mixed', 'change_mixed'],
jane: ['change_mixed'],
})
result = get_objects_for_user(self.joe, 'testapp.add_mixed')
self.assertEqual(sorted(p.pk for p in result),
sorted([self.mixed.pk]))
def test_get_users_with_perms_plus_groups_reverse_mixed(self):
User.objects.create_user('john', 'john@foobar.com', 'john')
jane = User.objects.create_user('jane', 'jane@foobar.com', 'jane')
group = Group.objects.create(name='devs')
self.joe.groups.add(group)
assign_perm('add_reversemixed', self.joe, self.reverse_mixed)
assign_perm('change_reversemixed', group, self.reverse_mixed)
assign_perm('change_reversemixed', jane, self.reverse_mixed)
self.assertEqual(get_users_with_perms(self.reverse_mixed, attach_perms=True),
{
self.joe: ['add_reversemixed', 'change_reversemixed'],
jane: ['change_reversemixed'],
})
result = get_objects_for_user(self.joe, 'testapp.add_reversemixed')
self.assertEqual(sorted(p.pk for p in result),
sorted([self.reverse_mixed.pk]))
|
|
import pytest
import datetime
import csv
from io import StringIO
from random import random
import time
from api.base.settings.defaults import API_BASE, DEFAULT_ES_NULL_VALUE
from osf_tests.factories import (
InstitutionFactory,
AuthUserFactory,
)
from osf.metrics import UserInstitutionProjectCounts
from api.base import settings
@pytest.mark.es
@pytest.mark.django_db
class TestInstitutionUserMetricList:
@pytest.fixture()
def institution(self):
return InstitutionFactory()
@pytest.fixture()
def user(self):
user = AuthUserFactory()
user.fullname = user.fullname + ',a'
user.save()
return user
@pytest.fixture()
def user2(self):
return AuthUserFactory()
@pytest.fixture()
def user3(self):
return AuthUserFactory(fullname='Zedd')
@pytest.fixture()
def user4(self):
return AuthUserFactory()
@pytest.fixture()
def admin(self, institution):
user = AuthUserFactory()
group = institution.get_group('institutional_admins')
group.user_set.add(user)
group.save()
return user
@pytest.fixture()
def populate_counts(self, institution, user, user2):
# Old data that shouldn't appear in responses
UserInstitutionProjectCounts.record(
user_id=user._id,
institution_id=institution._id,
department='Biology dept',
public_project_count=4,
private_project_count=4,
timestamp=datetime.date(2019, 6, 4)
).save()
# New data
UserInstitutionProjectCounts.record(
user_id=user._id,
institution_id=institution._id,
department='Biology dept',
public_project_count=6,
private_project_count=5,
).save()
UserInstitutionProjectCounts.record(
user_id=user2._id,
institution_id=institution._id,
department='Psychology dept',
public_project_count=3,
private_project_count=2,
).save()
time.sleep(10)
@pytest.fixture()
def populate_more_counts(self, institution, user, user2, user3, populate_counts):
# Creates 9 more user records to test pagination with
users = []
for i in range(0, 8):
users.append(AuthUserFactory())
for test_user in users:
UserInstitutionProjectCounts.record(
user_id=test_user._id,
institution_id=institution._id,
department='Psychology dept',
public_project_count=int(10 * random()),
private_project_count=int(10 * random()),
).save()
UserInstitutionProjectCounts.record(
user_id=user3._id,
institution_id=institution._id,
department='Psychology dept',
public_project_count=int(10 * random()),
private_project_count=int(10 * random()),
).save()
time.sleep(10)
@pytest.fixture()
def populate_na_department(self, institution, user4):
UserInstitutionProjectCounts.record(
user_id=user4._id,
institution_id=institution._id,
public_project_count=1,
private_project_count=1,
).save()
time.sleep(10)
@pytest.fixture()
def url(self, institution):
return f'/{API_BASE}institutions/{institution._id}/metrics/users/'
def test_auth(self, app, url, user, admin):
resp = app.get(url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(url, auth=user.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(url, auth=admin.auth)
assert resp.status_code == 200
assert resp.json['data'] == []
def test_get(self, app, url, user, user2, admin, institution, populate_counts):
resp = app.get(url, auth=admin.auth)
assert resp.json['data'] == [
{
'id': user._id,
'type': 'institution-users',
'attributes': {
'user_name': user.fullname,
'public_projects': 6,
'private_projects': 5,
'department': 'Biology dept'
},
'relationships': {
'user': {
'links': {
'related': {
'href': f'http://localhost:8000/v2/users/{user._id}/',
'meta': {}
}
},
'data': {
'id': user._id,
'type': 'users'
}
}
},
'links': {
'self': f'http://localhost:8000/v2/institutions/{institution._id}/metrics/users/'
}
},
{
'id': user2._id,
'type': 'institution-users',
'attributes': {
'user_name': user2.fullname,
'public_projects': 3,
'private_projects': 2,
'department': 'Psychology dept'
},
'relationships': {
'user': {
'links': {
'related': {
'href': f'http://localhost:8000/v2/users/{user2._id}/',
'meta': {}
}
},
'data': {
'id': user2._id,
'type': 'users'
}
}
},
'links': {
'self': f'http://localhost:8000/v2/institutions/{institution._id}/metrics/users/'
}
}
]
# Tests CSV Export
headers = {
'accept': 'text/csv'
}
resp = app.get(url, auth=admin.auth, headers=headers)
assert resp.status_code == 200
assert resp.headers['Content-Type'] == 'text/csv; charset=utf-8'
response_body = resp.text
expected_response = [['id', 'user_name', 'public_projects', 'private_projects', 'type'],
[user._id, user.fullname, '6', '5', 'institution-users'],
[user2._id, user2.fullname, '3', '2', 'institution-users']]
with StringIO(response_body) as csv_file:
csvreader = csv.reader(csv_file, delimiter=',')
for index, row in enumerate(csvreader):
assert row == expected_response[index]
def test_filter(self, app, url, admin, populate_counts):
resp = app.get(f'{url}?filter[department]=Psychology dept', auth=admin.auth)
assert resp.json['data'][0]['attributes']['department'] == 'Psychology dept'
@pytest.mark.skipif(settings.TRAVIS_ENV, reason='Non-deterministic fails on travis')
def test_sort_and_pagination(self, app, url, user, user2, user3, admin, populate_counts, populate_more_counts, institution):
resp = app.get(f'{url}?sort=user_name&page[size]=1&page=2', auth=admin.auth)
assert resp.status_code == 200
assert resp.json['links']['meta']['total'] == 11
resp = app.get(f'{url}?sort=user_name&page[size]=1&page=11', auth=admin.auth)
assert resp.json['data'][0]['attributes']['user_name'] == 'Zedd'
resp = app.get(f'{url}?sort=user_name&page=2', auth=admin.auth)
assert resp.json['links']['meta']['total'] == 11
assert resp.json['data'][-1]['attributes']['user_name'] == 'Zedd'
@pytest.mark.skipif(settings.TRAVIS_ENV, reason='Non-deterministic fails on travis')
def test_filter_and_pagination(self, app, user, user2, user3, url, admin, populate_counts, populate_more_counts, institution):
resp = app.get(f'{url}?page=2', auth=admin.auth)
assert resp.json['links']['meta']['total'] == 11
assert resp.json['data'][0]['attributes']['user_name'] == 'Zedd'
resp = app.get(f'{url}?filter[user_name]=Zedd', auth=admin.auth)
assert resp.json['links']['meta']['total'] == 1
assert resp.json['data'][0]['attributes']['user_name'] == 'Zedd'
@pytest.mark.skipif(settings.TRAVIS_ENV, reason='Non-deterministic fails on travis')
def test_filter_and_sort(self, app, url, user, user2, user3, admin, user4, populate_counts, populate_na_department, institution):
"""
Testing for bug where sorting and filtering would throw 502.
:param app:
:param url:
:param admin:
:param populate_more_counts:
:return:
"""
resp = app.get(f'{url}?page=1&page[size]=10&filter[department]={DEFAULT_ES_NULL_VALUE}&sort=user_name', auth=admin.auth)
assert resp.status_code == 200
data = resp.json['data']
assert len(data) == 1
assert resp.json['links']['meta']['total'] == 1
assert data[0]['id'] == user4._id
resp = app.get(f'{url}?page=1&page[size]=10&sort=department', auth=admin.auth)
assert resp.status_code == 200
data = resp.json['data']
assert len(data) == 3
assert resp.json['links']['meta']['total'] == 3
assert data[0]['attributes']['department'] == 'Biology dept'
assert data[1]['attributes']['department'] == 'N/A'
assert data[2]['attributes']['department'] == 'Psychology dept'
|
|
# Copyright 2012,2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Creates a spanning tree.
This component uses the discovery component to build a view of the network
topology, constructs a spanning tree, and then disables flooding on switch
ports that aren't on the tree by setting their NO_FLOOD bit. The result
is that topologies with loops no longer turn your network into useless
hot packet soup.
This component is inspired by and roughly based on the description of
Glenn Gibb's spanning tree module for NOX:
http://www.openflow.org/wk/index.php/Basic_Spanning_Tree
Note that this does not have much of a relationship to Spanning Tree
Protocol. They have similar purposes, but this is a rather different way
of going about it.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.revent import *
from collections import defaultdict
from pox.openflow.discovery import Discovery
from pox.lib.util import dpidToStr
from pox.lib.recoco import Timer
import time
import networkx as nx
import itertools
import pox.lib.packet as pkt
log = core.getLogger()
all_switches_set = set()
node_to_be_down = {}
# Might be nice if we made this accessible on core...
# _adj = defaultdict(lambda:defaultdict(lambda:[]))
def _calc_spanning_tree():
"""
Calculates the actual spanning tree
Returns it as dictionary where the keys are DPID1, and the
values are tuples of (DPID2, port-num), where port-num
is the port on DPID1 connecting to DPID2.
"""
def flip(link):
return Discovery.Link(link.dpid2, link.port2, link.dpid1, link.port1, link.link_type,link.available)
adj = defaultdict(lambda: defaultdict(lambda: []))
switches = set()
# Add all links and switches
for l in generator_for_link('lldp'):
adj[l.dpid1][l.dpid2].append(l)
switches.add(l.dpid1)
switches.add(l.dpid2)
# Cull links -- we want a single symmetric link connecting nodes
for s1 in switches:
for s2 in switches:
if s2 not in adj[s1]:
continue
if not isinstance(adj[s1][s2], list):
continue
assert s1 is not s2
good = False
for l in adj[s1][s2]:
if flip(l) in core.openflow_discovery.adjacency:
# This is a good one
adj[s1][s2] = l.port1
adj[s2][s1] = l.port2
good = True
break
if not good:
del adj[s1][s2]
if s1 in adj[s2]:
# Delete the other way too
del adj[s2][s1]
q = []
more = set(switches)
done = set()
tree = defaultdict(set)
while True:
q = sorted(list(more)) + q
more.clear()
if len(q) == 0: break
v = q.pop(False)
if v in done: continue
done.add(v)
for w, p in adj[v].iteritems():
if w in tree: continue
more.add(w)
tree[v].add((w, p))
tree[w].add((v, adj[w][v]))
if False:
log.debug("*** SPANNING TREE ***")
for sw, ports in tree.iteritems():
# print " ", dpidToStr(sw), ":", sorted(list(ports))
# print " ", sw, ":", [l[0] for l in sorted(list(ports))]
log.debug((" %i : " % sw) + " ".join([str(l[0]) for l in
sorted(list(ports))]))
log.debug("*********************")
return tree
# Keep a list of previous port states so that we can skip some port mods
# If other things mess with port states, these may not be correct. We
# could also refer to Connection.ports, but those are not guaranteed to
# be up to date.
_prev = defaultdict(lambda: defaultdict(lambda: None))
# If True, we set ports down when a switch connects
_noflood_by_default = False
# If True, don't allow turning off flood bits until a complete discovery
# cycle should have completed (mostly makes sense with _noflood_by_default).
_hold_down = False
def _handle_ConnectionUp(event):
# When a switch connects, forget about previous port states
_prev[event.dpid].clear()
if _noflood_by_default:
con = event.connection
log.debug("Disabling flooding for %i ports", len(con.ports))
for p in con.ports.itervalues():
if p.port_no >= of.OFPP_MAX: continue
_prev[con.dpid][p.port_no] = False
pm = of.ofp_port_mod(port_no=p.port_no,
hw_addr=p.hw_addr,
config=of.OFPPC_NO_FLOOD,
mask=of.OFPPC_NO_FLOOD)
con.send(pm)
_invalidate_ports(con.dpid)
if _hold_down:
t = Timer(core.openflow_discovery.send_cycle_time + 1, _update_tree,
kw={'force_dpid': event.dpid})
def _handle_LinkEvent(event):
if event.link.link_type is 'lldp':
# When links change, update spanning tree
(dp1, p1), (dp2, p2) = event.link.end
if _prev[dp1][p1] is False:
if _prev[dp2][p2] is False:
# We're disabling this link; who cares if it's up or down?
# log.debug("Ignoring link status for %s", event.link)
return
_update_tree()
elif event.link.link_type is 'broadcast':
update_sw_cloud_site_domain()
def _update_tree(force_dpid=None):
"""
Update spanning tree
force_dpid specifies a switch we want to update even if we are supposed
to be holding down changes.
"""
# Get a spanning tree
tree = _calc_spanning_tree()
log.debug("Spanning tree updated")
# Connections born before this time are old enough that a complete
# discovery cycle should have completed (and, thus, all of their
# links should have been discovered).
enable_time = time.time() - core.openflow_discovery.send_cycle_time - 1
# Now modify ports as needed
try:
change_count = 0
for sw, ports in tree.iteritems():
con = core.openflow.getConnection(sw)
if con is None: continue # Must have disconnected
if con.connect_time is None: continue # Not fully connected
if _hold_down:
if con.connect_time > enable_time:
# Too young -- we should hold down changes.
if force_dpid is not None and sw == force_dpid:
# .. but we'll allow it anyway
pass
else:
continue
tree_ports = [p[1] for p in ports]
for p in con.ports.itervalues():
if p.port_no < of.OFPP_MAX:
flood = p.port_no in tree_ports
if not flood:
if core.openflow_discovery.is_edge_port(sw, p.port_no) or \
core.openflow_discovery._is_broadcast_port(sw,p.port_no):
flood = True
if _prev[sw][p.port_no] is flood:
# print sw,p.port_no,"skip","(",flood,")"
continue # Skip
change_count += 1
_prev[sw][p.port_no] = flood
# print sw,p.port_no,flood
# TODO: Check results
pm = of.ofp_port_mod(port_no=p.port_no,
hw_addr=p.hw_addr,
config=0 if flood else of.OFPPC_NO_FLOOD,
mask=of.OFPPC_NO_FLOOD)
con.send(pm)
_invalidate_ports(con.dpid)
if change_count:
log.info("%i ports changed", change_count)
except:
_prev.clear()
log.exception("Couldn't push spanning tree")
def _check_path(dpid1, dpid2):
if dpid1 == dpid2:
return True
else:
g = _graph_for_link('lldp')
return nx.has_path(g, dpid1, dpid2) if all(i in g.nodes() for i in [dpid1, dpid2]) else log.info(
'not all nodes in g')
def _get_openflow_domain():
g = _graph_for_link('lldp')
domain_sw_dpid_set = set()
sw_lldp_set = set()
of_domain_set = set()
for x in nx.connected_components(g):
sw_lldp_set.update(x)
domain_sw_dpid_set.add(frozenset(x))
for x in all_switches_set.difference(sw_lldp_set):
domain_sw_dpid_set.add(frozenset([x]))
for dpid_set in domain_sw_dpid_set:
of_domain = Of_domain(dpid_set)
of_domain_set.add(of_domain)
return of_domain_set
def _clear_flow_for_all_sw():
all_switches_set.clear()
for link in generator_for_link():
all_switches_set.update([link.dpid1, link.dpid2])
for sw in all_switches_set:
clear = of.ofp_flow_mod(command=of.OFPFC_DELETE)
con = core.openflow.getConnection(sw)
if con is None: continue
con.send(clear)
def _clear_broadcast_link_availablity():
for link in generator_for_link('broadcast'):
link.available = True
def update_sw_cloud_site_domain():
clouds_set, sites_set, switches_set = _set_switches_clouds_sites()
_clear_flow_for_all_sw()
_clear_broadcast_link_availablity()
_set_port_status_for_every_site(switches_set)
of_domain_sets = _get_openflow_domain()
for domain in of_domain_sets:
for site in sites_set:
if site.sw_dpid_set.issubset(domain.sw_dpid_set):
site.of_domain = domain
domain.sites.add(site)
continue
sites_to_be_down = form_big_spanning_tree(clouds_set)
if sites_to_be_down:
switches_to_be_down = map(lambda x: x.switches[0], sites_to_be_down)
global node_to_be_down
node_to_be_down.clear()
for x in switches_to_be_down:
node_to_be_down[x.cloud.get_active_switches_for_cloud()] = x.dpid,x.port_number
# map(lambda x: test[x.cloud.get_active_switches_for_cloud()] = (x.dpid,x.port_number),switches_to_be_down)
_send_sw_flows_no_floods(switches_to_be_down)
send_lldp_broadcast_drop_flow_for_switches(switches_to_be_down)
def form_big_spanning_tree(clouds):
original_g = nx.Graph()
if not clouds: return None
for cloud in clouds:
if not cloud.sites: return None
for site in cloud.sites:
if not site.switches: return None
original_g.add_edge(cloud, site.of_domain,weight=site.switches[0].dpid, site=site)
spt = nx.minimum_spanning_tree(original_g)
spt_att = nx.get_edge_attributes(spt, 'site')
original_g_att = nx.get_edge_attributes(original_g, 'site')
return set(original_g_att.itervalues()) - set(spt_att.itervalues())
def _set_switches_clouds_sites():
clouds_set = set()
switches_set = set()
sites_set = set()
cloud_g = nx.Graph()
for link in generator_for_link('broadcast'):
cloud_g.add_edge(Switch(link.dpid1, link.port1), Switch(link.dpid2, link.port2))
for clique in nx.find_cliques(cloud_g):
cloud = Cloud()
clouds_set.add(cloud)
for sw in clique:
sw.cloud = cloud
cloud.switches.add(sw)
switches_set.add(sw)
for cloud in clouds_set:
site_g = nx.Graph()
for i in itertools.combinations_with_replacement(cloud.switches, 2):
if _check_path(i[0].dpid, i[1].dpid):
site_g.add_edge(i[0], i[1])
for sw_in_site in nx.connected_components(site_g):
sw_in_site.sort(key=lambda switch: switch.dpid)
sw_in_site[0].active = True
site = Site()
sites_set.add(site)
site.cloud = cloud
cloud.sites.add(site)
for sw in sw_in_site:
sw.site = site
site.add_switch(sw)
return clouds_set, sites_set, switches_set
def _send_sw_flows_no_floods(switches_set):
for sw in switches_set:
con = core.openflow.getConnection(sw.dpid)
if con is None:
log.debug('There isnt any connection between %s and controller' % sw.dpid)
continue
if con.connect_time is None:
log.debug('Not fully connected')
continue
pm = of.ofp_port_mod(port_no=sw.port_number, hw_addr=dpid_port_to_mac(sw.dpid, sw.port_number, con),
config=of.OFPPC_NO_FLOOD, mask=of.OFPPC_NO_FLOOD)
con.send(pm)
def _set_port_status_for_every_site(switches_set):
for switch in switches_set:
con = core.openflow.getConnection(switch.dpid)
if con is None:
log.debug('There isnt any connection between %s and controller' % switch.dpid)
continue
if con.connect_time is None:
log.debug('Not fully connected')
continue
if switch.active is False:
send_lldp_broadcast_drop_flow (switch, con)
_tag_broadcast_link(switch.dpid,switch.port_number)
pm = of.ofp_port_mod(port_no=switch.port_number, hw_addr=dpid_port_to_mac(switch.dpid, switch.port_number, con),
config=0 if switch.active else of.OFPPC_NO_FLOOD, mask=of.OFPPC_NO_FLOOD)
con.send(pm)
def send_lldp_broadcast_drop_flow_for_switches(switches):
for sw in switches:
con = core.openflow.getConnection(sw.dpid)
if con is None or con.connect_time is None: pass
send_lldp_broadcast_drop_flow(sw,con)
def send_lldp_broadcast_drop_flow(switch, con):
if con is None or con.connect_time is None: return
match_rule = defaultdict()
match_rule['lldp'] = of.ofp_match(in_port=switch.port_number, dl_dst=pkt.ETHERNET.LLDP_MULTICAST,
dl_type=pkt.ethernet.LLDP_TYPE)
match_rule['broadcast'] = of.ofp_match(in_port=switch.port_number, dl_dst=pkt.ETHERNET.ETHER_BROADCAST,
dl_type=pkt.ethernet.LLDP_TYPE)
match_rule['any'] = of.ofp_match(in_port=switch.port_number)
for key, match in match_rule.iteritems():
add_flow_msg = of.ofp_flow_mod(match=match)
if key is 'any':
add_flow_msg.priority = of.OFP_DEFAULT_PRIORITY
# add_flow_msg.actions.append(of.ofp_action_output(port=of.OFPP_NONE))
else:
add_flow_msg.priority = of.OFP_HIGH_PRIORITY
add_flow_msg.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
con.send(add_flow_msg)
def dpid_port_to_mac(dpid, port_numer, con=None):
if con is None:
con = core.openflow.getConnection(dpid)
if con is None: pass
if con.connect_time is None: pass
for port in con.original_ports._ports:
if port_numer is port.port_no:
return port.hw_addr
def generator_for_link(link_type=None):
if link_type is None:
return (l for l in core.openflow_discovery.adjacency)
elif link_type is not 'lldp' and link_type is not 'broadcast':
log.debug('type is not correct')
return None
else:
return (l for l in core.openflow_discovery.adjacency if l.link_type is link_type)
_dirty_switches = {} # A map dpid_with_dirty_ports->Timer
_coalesce_period = 2 # Seconds to wait between features requests
def _tag_broadcast_link(dpid,port_number):
for link in generator_for_link('broadcast'):
if ((dpid,port_number) == (link.dpid1,link.port1)) or ((dpid,port_number) == (link.dpid2, link.port2)):
link.available = False
def _invalidate_ports(dpid):
"""
Registers the fact that port info for dpid may be out of date
When the spanning tree adjusts the port flags, the port config bits
we keep in the Connection become out of date. We don't want to just
set them locally because an in-flight port status message could
overwrite them. We also might not want to assume they get set the
way we want them. SO, we do send a features request, but we wait a
moment before sending it so that we can potentially coalesce several.
TLDR: Port information for this switch may be out of date for around
_coalesce_period seconds.
"""
if dpid in _dirty_switches:
# We're already planning to check
return
t = Timer(_coalesce_period, _check_ports, args=(dpid,))
_dirty_switches[dpid] = t
def _check_ports(dpid):
"""
Sends a features request to the given dpid
"""
_dirty_switches.pop(dpid, None)
con = core.openflow.getConnection(dpid)
if con is None: return
con.send(of.ofp_barrier_request())
con.send(of.ofp_features_request())
log.debug("Requested switch features for %s", str(con))
class Switch(object):
"""docstring for switch"""
def __init__(self, dpid=0, port_number=0, cloud=None, site=None, active=False):
super(Switch, self).__init__()
self.dpid = dpid
self.port_number = port_number
self.cloud = cloud
self.site = site
self.active = active
def __str__(self):
return 'dpid is %s, port_number is %s, the cloud is %s, the site is %s' % \
(self.dpid, self.port_number, self.cloud, self.site)
def __repr__(self):
return 'dpid is %s, port_number is %s, the cloud is %s, the site is %s' % \
(self.dpid, self.port_number, self.cloud, self.site)
def __hash__(self):
return self.dpid + self.port_number
def __eq__(self, other):
return self.dpid == other.dpid and self.port_number == other.port_number
class Cloud(object):
def __init__(self):
super(Cloud, self).__init__()
self.switches = set()
self.sites = set()
def __repr__(self):
return 'cloud ' + str([sw.dpid for sw in self.switches])
def __str__(self):
return 'cloud ' + str([sw.dpid for sw in self.switches])
def get_active_switches_for_cloud(self):
swithes = filter(lambda x: x.active is True,self.switches)
return frozenset(map(lambda x: (x.dpid,x.port_number),swithes))
class Site(object, ):
def __init__(self):
super(Site, self).__init__()
self.switches = []
self.cloud = None
self.sw_dpid_set = set()
self.of_domain = None
def add_switch(self, sw):
self.switches.append(sw)
self.sw_dpid_in_site()
def sw_dpid_in_site(self):
map(lambda sw: self.sw_dpid_set.add(sw.dpid), self.switches)
return self.sw_dpid_set
def __repr__(self):
return 'site ' + str([sw.dpid for sw in self.switches])
def __str__(self):
return 'site ' + str([sw.dpid for sw in self.switches])
class Of_domain(object):
def __init__(self, dpid_set=None):
super(Of_domain, self).__init__()
self.sw_dpid_set = dpid_set
self.sites = set()
def __repr__(self):
return 'of_domain ' + str(self.sw_dpid_set)
def __str__(self):
return 'of_domain ' + str(self.sw_dpid_set)
def _graph_for_link(link_type):
g = nx.Graph()
for link in generator_for_link(link_type):
g.add_edge(link.dpid1, link.dpid2)
return g
def launch(no_flood=False, hold_down=False):
global _noflood_by_default, _hold_down
if no_flood is True:
_noflood_by_default = True
if hold_down is True:
_hold_down = True
def start_spanning_tree():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
core.openflow_discovery.addListenerByName("LinkEvent", _handle_LinkEvent,priority=100)
log.debug("Spanning tree component ready")
core.call_when_ready(start_spanning_tree, "openflow_discovery")
|
|
import os
import pytest
import pip.baseparser
from pip import main
from pip import cmdoptions
from pip.basecommand import Command
from pip.commands import commands
class FakeCommand(Command):
name = 'fake'
summary = name
def main(self, args):
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.add_option_group(index_opts)
return self.parse_args(args)
class TestOptionPrecedence(object):
"""
Tests for confirming our option precedence:
cli -> environment -> subcommand config -> global config -> option
defaults
"""
def setup(self):
self.environ_before = os.environ.copy()
commands[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands.pop(FakeCommand.name)
def get_config_section(self, section):
config = {
'global': [('timeout', '-3')],
'fake': [('timeout', '-2')],
}
return config[section]
def get_config_section_global(self, section):
config = {
'global': [('timeout', '-3')],
'fake': [],
}
return config[section]
def test_env_override_default_int(self):
"""
Test that environment variable overrides an int option default.
"""
os.environ['PIP_TIMEOUT'] = '-1'
options, args = main(['fake'])
assert options.timeout == -1
def test_env_override_default_append(self):
"""
Test that environment variable overrides an append option default.
"""
os.environ['PIP_FIND_LINKS'] = 'F1'
options, args = main(['fake'])
assert options.find_links == ['F1']
os.environ['PIP_FIND_LINKS'] = 'F1 F2'
options, args = main(['fake'])
assert options.find_links == ['F1', 'F2']
def test_env_override_default_choice(self):
"""
Test that environment variable overrides a choice option default.
"""
os.environ['PIP_EXISTS_ACTION'] = 'w'
options, args = main(['fake'])
assert options.exists_action == ['w']
os.environ['PIP_EXISTS_ACTION'] = 's w'
options, args = main(['fake'])
assert options.exists_action == ['s', 'w']
def test_env_alias_override_default(self):
"""
When an option has multiple long forms, test that the technique of
using the env variable, "PIP_<long form>" works for all cases.
(e.g. PIP_LOG_FILE and PIP_LOCAL_LOG should all work)
"""
os.environ['PIP_LOG_FILE'] = 'override.log'
options, args = main(['fake'])
assert options.log_file == 'override.log'
os.environ['PIP_LOCAL_LOG'] = 'override.log'
options, args = main(['fake'])
assert options.log_file == 'override.log'
def test_cli_override_environment(self):
"""
Test the cli overrides and environment variable
"""
os.environ['PIP_TIMEOUT'] = '-1'
options, args = main(['fake', '--timeout', '-2'])
assert options.timeout == -2
def test_environment_override_config(self, monkeypatch):
"""
Test an environment variable overrides the config file
"""
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
"get_config_section",
self.get_config_section,
)
os.environ['PIP_TIMEOUT'] = '-1'
options, args = main(['fake'])
assert options.timeout == -1
def test_commmand_config_override_global_config(self, monkeypatch):
"""
Test that command config overrides global config
"""
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
"get_config_section",
self.get_config_section,
)
options, args = main(['fake'])
assert options.timeout == -2
def test_global_config_is_used(self, monkeypatch):
"""
Test that global config is used
"""
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
"get_config_section",
self.get_config_section_global,
)
options, args = main(['fake'])
assert options.timeout == -3
class TestOptionsInterspersed(object):
def setup(self):
self.environ_before = os.environ.copy()
commands[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands.pop(FakeCommand.name)
def test_general_option_after_subcommand(self):
options, args = main(['fake', '--timeout', '-1'])
assert options.timeout == -1
def test_option_after_subcommand_arg(self):
options, args = main(['fake', 'arg', '--timeout', '-1'])
assert options.timeout == -1
def test_additive_before_after_subcommand(self):
options, args = main(['-v', 'fake', '-v'])
assert options.verbose == 2
def test_subcommand_option_before_subcommand_fails(self):
with pytest.raises(SystemExit):
main(['--find-links', 'F1', 'fake'])
class TestGeneralOptions(object):
# the reason to specifically test general options is due to the
# extra processing they receive, and the number of bugs we've had
def setup(self):
self.environ_before = os.environ.copy()
commands[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands.pop(FakeCommand.name)
def test_require_virtualenv(self):
options1, args1 = main(['--require-virtualenv', 'fake'])
options2, args2 = main(['fake', '--require-virtualenv'])
assert options1.require_venv
assert options2.require_venv
def test_verbose(self):
options1, args1 = main(['--verbose', 'fake'])
options2, args2 = main(['fake', '--verbose'])
assert options1.verbose == options2.verbose == 1
def test_quiet(self):
options1, args1 = main(['--quiet', 'fake'])
options2, args2 = main(['fake', '--quiet'])
assert options1.quiet == options2.quiet == 1
def test_log(self):
options1, args1 = main(['--log', 'path', 'fake'])
options2, args2 = main(['fake', '--log', 'path'])
assert options1.log == options2.log == 'path'
def test_log_explicit_levels(self):
options1, args1 = main(['--log-explicit-levels', 'fake'])
options2, args2 = main(['fake', '--log-explicit-levels'])
assert options1.log_explicit_levels
assert options2.log_explicit_levels
def test_local_log(self):
options1, args1 = main(['--local-log', 'path', 'fake'])
options2, args2 = main(['fake', '--local-log', 'path'])
assert options1.log_file == options2.log_file == 'path'
def test_no_input(self):
options1, args1 = main(['--no-input', 'fake'])
options2, args2 = main(['fake', '--no-input'])
assert options1.no_input
assert options2.no_input
def test_proxy(self):
options1, args1 = main(['--proxy', 'path', 'fake'])
options2, args2 = main(['fake', '--proxy', 'path'])
assert options1.proxy == options2.proxy == 'path'
def test_retries(self):
options1, args1 = main(['--retries', '-1', 'fake'])
options2, args2 = main(['fake', '--retries', '-1'])
assert options1.retries == options2.retries == -1
def test_timeout(self):
options1, args1 = main(['--timeout', '-1', 'fake'])
options2, args2 = main(['fake', '--timeout', '-1'])
assert options1.timeout == options2.timeout == -1
def test_default_vcs(self):
options1, args1 = main(['--default-vcs', 'path', 'fake'])
options2, args2 = main(['fake', '--default-vcs', 'path'])
assert options1.default_vcs == options2.default_vcs == 'path'
def test_skip_requirements_regex(self):
options1, args1 = main(['--skip-requirements-regex', 'path', 'fake'])
options2, args2 = main(['fake', '--skip-requirements-regex', 'path'])
assert options1.skip_requirements_regex == 'path'
assert options2.skip_requirements_regex == 'path'
def test_exists_action(self):
options1, args1 = main(['--exists-action', 'w', 'fake'])
options2, args2 = main(['fake', '--exists-action', 'w'])
assert options1.exists_action == options2.exists_action == ['w']
def test_cert(self):
options1, args1 = main(['--cert', 'path', 'fake'])
options2, args2 = main(['fake', '--cert', 'path'])
assert options1.cert == options2.cert == 'path'
def test_client_cert(self):
options1, args1 = main(['--client-cert', 'path', 'fake'])
options2, args2 = main(['fake', '--client-cert', 'path'])
assert options1.client_cert == options2.client_cert == 'path'
class TestOptionsConfigFiles(object):
def test_venv_config_file_found(self, monkeypatch):
# We only want a dummy object to call the get_config_files method
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
'__init__',
lambda self: None,
)
# If we are running in a virtualenv and all files appear to exist,
# we should see two config files.
monkeypatch.setattr(
pip.baseparser,
'running_under_virtualenv',
lambda: True,
)
monkeypatch.setattr(os.path, 'exists', lambda filename: True)
cp = pip.baseparser.ConfigOptionParser()
assert len(cp.get_config_files()) == 2
|
|
# Client for the DICT protocol (RFC2229)
#
# Copyright (C) 2002 John Goerzen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import socket, re
version = '1.0'
def dequote(str):
"""Will remove single or double quotes from the start and end of a string
and return the result."""
quotechars = "'\""
while len(str) and str[0] in quotechars:
str = str[1:]
while len(str) and str[-1] in quotechars:
str = str[0:-1]
return str
def enquote(str):
"""This function will put a string in double quotes, properly
escaping any existing double quotes with a backslash. It will
return the result."""
return '"' + str.replace('"', "\\\"") + '"'
class Connection:
"""This class is used to establish a connection to a database server.
You will usually use this as the first call into the dictclient library.
Instantiating it takes two optional arguments: a hostname (a string)
and a port (an int). The hostname defaults to localhost
and the port to 2628, the port specified in RFC."""
def __init__(self, hostname = 'localhost', port = 2628):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((hostname, port))
self.rfile = self.sock.makefile("rb")
self.wfile = self.sock.makefile("wb", 0)
self.saveconnectioninfo()
def getresultcode(self):
"""Generic function to get a result code. It will return a list
consisting of two items: the integer result code and the text
following. You will not usually use this function directly."""
line = self.rfile.readline().decode('utf8').strip()
if line.startswith('['):
return [None, line]
code, text = line.split(' ', 1)
return [int(code), text]
def get200result(self):
"""Used when expecting a single line of text -- a 200-class
result. Returns [intcode, remaindertext]"""
code, text = self.getresultcode()
if code < 200 or code >= 300:
raise Exception("Got '%s' when 200-class response expected" % \
line)
return [code, text]
def get100block(self):
"""Used when expecting multiple lines of text -- gets the block
part only. Does not get any codes or anything! Returns a string."""
data = []
while True:
line = self.rfile.readline().decode('utf8').strip()
if line == '.':
break
data.append(line)
return "\n".join(data)
def get100result(self):
"""Used when expecting multiple lines of text, terminated by a period
and a 200 code. Returns: [initialcode, [bodytext_1lineperentry],
finalcode]"""
code, text = self.getresultcode()
if code < 100 or code >= 200:
raise Exception("Got '%s' when 100-class response expected" % \
code)
bodylines = self.get100block().split("\n")
code2 = self.get200result()[0]
return [code, bodylines, code2]
def get100dict(self):
"""Used when expecting a dictionary of results. Will read from
the initial 100 code, to a period and the 200 code."""
dict = {}
for line in self.get100result()[1]:
key, val = line.split(' ', 1)
dict[key] = dequote(val)
return dict
def saveconnectioninfo(self):
"""Called by __init__ to handle the initial connection. Will
save off the capabilities and messageid."""
code, string = self.get200result()
assert code == 220
capstr, msgid = re.search('<(.*)> (<.*>)$', string).groups()
self.capabilities = capstr.split('.')
self.messageid = msgid
def getcapabilities(self):
"""Returns a list of the capabilities advertised by the server."""
return self.capabilities
def getmessageid(self):
"""Returns the message id, including angle brackets."""
return self.messageid
def getdbdescs(self):
"""Gets a dict of available databases. The key is the db name
and the value is the db description. This command may generate
network traffic!"""
if hasattr(self, 'dbdescs'):
return self.dbdescs
self.sendcommand("SHOW DB")
self.dbdescs = self.get100dict()
return self.dbdescs
def getstratdescs(self):
"""Gets a dict of available strategies. The key is the strat
name and the value is the strat description. This call may
generate network traffic!"""
if hasattr(self, 'stratdescs'):
return self.stratdescs
self.sendcommand("SHOW STRAT")
self.stratdescs = self.get100dict()
return self.stratdescs
def getdbobj(self, dbname):
"""Gets a Database object corresponding to the database name passed
in. This function explicitly will *not* generate network traffic.
If you have not yet run getdbdescs(), it will fail."""
if not hasattr(self, 'dbobjs'):
self.dbobjs = {}
if dbname in self.dbobjs:
return self.dbobjs[dbname]
# We use self.dbdescs explicitly since we don't want to
# generate net traffic with this request!
if dbname != '*' and dbname != '!' and \
not dbname in self.dbdescs.keys():
raise Exception("Invalid database name '%s'" % dbname)
self.dbobjs[dbname] = Database(self, dbname)
return self.dbobjs[dbname]
def sendcommand(self, command):
"""Takes a command, without a newline character, and sends it to
the server."""
self.wfile.write(command.encode('utf-8') + b"\n")
def define(self, database, word):
"""Returns a list of Definition objects for each matching
definition. Parameters are the database name and the word
to look up. This is one of the main functions you will use
to interact with the server. Returns a list of Definition
objects. If there are no matches, an empty list is returned.
Note: database may be '*' which means to search all databases,
or '!' which means to return matches from the first database that
has a match."""
self.getdbdescs() # Prime the cache
if database != '*' and database != '!' and \
not database in self.getdbdescs():
raise Exception("Invalid database '%s' specified" % database)
self.sendcommand("DEFINE " + enquote(database) + " " + enquote(word))
code = self.getresultcode()[0]
retval = []
if code == 552:
# No definitions.
return []
if code != 150:
raise Exception("Unknown code %d" % code)
while True:
code, text = self.getresultcode()
if code != 151 or code is None:
break
resultword, resultdb = re.search('^"(.+)" (\S+)', text).groups()
defstr = self.get100block()
retval.append(Definition(self, self.getdbobj(resultdb),
resultword, defstr))
return retval
def match(self, database, strategy, word):
"""Gets matches for a query. Arguments are database name,
the strategy (see available ones in getstratdescs()), and the
pattern/word to look for. Returns a list of Definition objects.
If there is no match, an empty list is returned.
Note: database may be '*' which means to search all databases,
or '!' which means to return matches from the first database that
has a match."""
self.getstratdescs() # Prime the cache
self.getdbdescs() # Prime the cache
if not strategy in self.getstratdescs().keys():
raise Exception("Invalid strategy '%s'" % strategy)
if database != '*' and database != '!' and \
not database in self.getdbdescs().keys():
raise Exception("Invalid database name '%s'" % database)
self.sendcommand("MATCH %s %s %s" % (enquote(database),
enquote(strategy),
enquote(word)))
code = self.getresultcode()[0]
if code == 552:
# No Matches
return []
if code != 152:
raise Exception("Unexpected code %d" % code)
retval = []
for matchline in self.get100block().split("\n"):
matchdict, matchword = matchline.split(" ", 1)
retval.append(Definition(self, self.getdbobj(matchdict),
dequote(matchword)))
if self.getresultcode()[0] != 250:
raise Exception("Unexpected end-of-list code %d" % code)
return retval
class Database:
"""An object corresponding to a particular database in a server."""
def __init__(self, dictconn, dbname):
"""Initialize the object -- requires a Connection object and
a database name."""
self.conn = dictconn
self.name = dbname
def getname(self):
"""Returns the short name for this database."""
return self.name
def getdescription(self):
if hasattr(self, 'description'):
return self.description
if self.getname() == '*':
self.description = 'All Databases'
elif self.getname() == '!':
self.description = 'First matching database'
else:
self.description = self.conn.getdbdescs()[self.getname()]
return self.description
def getinfo(self):
"""Returns a string of info describing this database."""
if hasattr(self, 'info'):
return self.info
if self.getname() == '*':
self.info = "This special database will search all databases on the system."
elif self.getname() == '!':
self.info = "This special database will return matches from the first matching database."
else:
self.conn.sendcommand("SHOW INFO " + self.name)
self.info = "\n".join(self.conn.get100result()[1])
return self.info
def define(self, word):
"""Get a definition from within this database.
The argument, word, is the word to look up. The return value is the
same as from Connection.define()."""
return self.conn.define(self.getname(), word)
def match(self, strategy, word):
"""Get a match from within this database.
The argument, word, is the word to look up. The return value is
the same as from Connection.define()."""
return self.conn.match(self.getname(), strategy, word)
class Definition:
"""An object corresponding to a single definition."""
def __init__(self, dictconn, db, word, defstr = None):
"""Instantiate the object. Requires: a Connection object,
a Database object (NOT corresponding to '*' or '!' databases),
a word. Optional: a definition string. If not supplied,
it will be fetched if/when it is requested."""
self.conn = dictconn
self.db = db
self.word = word
self.defstr = defstr
def getdb(self):
"""Get the Database object corresponding to this definition."""
return self.db
def getdefstr(self):
"""Get the definition string (the actual content) of this
definition."""
if not self.defstr:
self.defstr = self.conn.define(self.getdb().getname(), self.word)[0].getdefstr()
return self.defstr
def getword(self):
"""Get the word this object describes."""
return self.word
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""HierarchicalController Class.
The HierarchicalController encompasses the entire lifecycle of training the
device placement policy, including generating op embeddings, getting groups for
each op, placing those groups and running the predicted placements.
Different assignment models can inherit from this class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.grappler.controller import Controller
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training_util
class PlacerParams(object):
"""Class to hold a set of placement parameters as name-value pairs.
A typical usage is as follows:
```python
# Create a PlacerParams object specifying names and values of the model
# parameters:
params = PlacerParams(hidden_size=128, decay_steps=50)
# The parameters are available as attributes of the PlacerParams object:
hparams.hidden_size ==> 128
hparams.decay_steps ==> 50
```
"""
def __init__(self, **kwargs):
"""Create an instance of `PlacerParams` from keyword arguments.
The keyword arguments specify name-values pairs for the parameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `PlacerParams` object,
and they can be accessed directly with the dot notation `params._name_`.
Example:
```python
# Define 1 parameter: 'hidden_size'
params = PlacerParams(hidden_size=128)
params.hidden_size ==> 128
```
Args:
**kwargs: Key-value pairs where the key is the parameter name and
the value is the value for the parameter.
"""
for name, value in six.iteritems(kwargs):
self.add_param(name, value)
def add_param(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could be the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# parameter name.
if getattr(self, name, None) is not None:
raise ValueError("Parameter name is reserved: %s" % name)
setattr(self, name, value)
def hierarchical_controller_hparams():
"""Hyperparameters for hierarchical planner."""
return PlacerParams(
hidden_size=512,
forget_bias_init=1.0,
temperature=1.0,
logits_std_noise=0.5,
stop_noise_step=750,
decay_steps=50,
max_num_outputs=5,
max_output_size=5,
tanh_constant=1.0,
adj_embed_dim=20,
grouping_hidden_size=64,
num_groups=None,
bi_lstm=True,
failing_signal=100,
stop_sampling=500,
start_with_failing_signal=True,
always_update_baseline=False,
bl_dec=0.9,
grad_bound=1.0,
lr=0.1,
lr_dec=0.95,
start_decay_step=400,
optimizer_type="adam",
stop_updating_after_steps=1000,
name="hierarchical_controller",
keep_prob=1.0,
reward_function="sqrt",
seed=1234,
# distributed training params
num_children=1)
class HierarchicalController(Controller):
"""HierarchicalController class."""
def __init__(self, hparams, item, cluster, controller_id=0):
"""HierarchicalController class initializer.
Args:
hparams: All hyper-parameters.
item: The metagraph to place.
cluster: The cluster of hardware devices to optimize for.
controller_id: the id of the controller in a multi-controller setup.
"""
super(HierarchicalController, self).__init__(item, cluster)
self.ctrl_id = controller_id
self.hparams = hparams
if self.hparams.num_groups is None:
self.num_groups = min(256, 20 * self.num_devices)
else:
self.num_groups = self.hparams.num_groups
# creates self.op_embeddings and self.type_dict
self.create_op_embeddings(verbose=False)
# TODO(azalia) clean up embedding/group_embedding_size names
self.group_emb_size = (
2 * self.num_groups + len(self.type_dict) +
self.hparams.max_num_outputs * self.hparams.max_output_size)
self.embedding_size = self.group_emb_size
self.initializer = init_ops.glorot_uniform_initializer(
seed=self.hparams.seed)
with variable_scope.variable_scope(
self.hparams.name,
initializer=self.initializer,
reuse=variable_scope.AUTO_REUSE):
# define parameters of feedforward
variable_scope.get_variable("w_grouping_ff", [
1 + self.hparams.max_num_outputs * self.hparams.max_output_size +
self.hparams.adj_embed_dim, self.hparams.grouping_hidden_size
])
variable_scope.get_variable(
"w_grouping_softmax",
[self.hparams.grouping_hidden_size, self.num_groups])
if self.hparams.bi_lstm:
variable_scope.get_variable("encoder_lstm_forward", [
self.embedding_size + self.hparams.hidden_size / 2,
2 * self.hparams.hidden_size
])
variable_scope.get_variable("encoder_lstm_backward", [
self.embedding_size + self.hparams.hidden_size / 2,
2 * self.hparams.hidden_size
])
variable_scope.get_variable(
"device_embeddings", [self.num_devices, self.hparams.hidden_size])
variable_scope.get_variable(
"decoder_lstm",
[2 * self.hparams.hidden_size, 4 * self.hparams.hidden_size])
variable_scope.get_variable(
"device_softmax", [2 * self.hparams.hidden_size, self.num_devices])
variable_scope.get_variable("device_go_embedding",
[1, self.hparams.hidden_size])
variable_scope.get_variable(
"encoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"decoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"attn_w_1", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable(
"attn_w_2", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable("attn_v", [self.hparams.hidden_size, 1])
else:
variable_scope.get_variable("encoder_lstm", [
self.embedding_size + self.hparams.hidden_size,
4 * self.hparams.hidden_size
])
variable_scope.get_variable(
"device_embeddings", [self.num_devices, self.hparams.hidden_size])
variable_scope.get_variable(
"decoder_lstm",
[2 * self.hparams.hidden_size, 4 * self.hparams.hidden_size])
variable_scope.get_variable(
"device_softmax", [2 * self.hparams.hidden_size, self.num_devices])
variable_scope.get_variable("device_go_embedding",
[1, self.hparams.hidden_size])
variable_scope.get_variable(
"encoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"decoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"attn_w_1", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable(
"attn_w_2", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable("attn_v", [self.hparams.hidden_size, 1])
seq2seq_input_layer = array_ops.placeholder_with_default(
array_ops.zeros([self.hparams.num_children,
self.num_groups,
self.group_emb_size],
dtypes.float32),
shape=(self.hparams.num_children, self.num_groups, self.group_emb_size))
self.seq2seq_input_layer = seq2seq_input_layer
def compute_reward(self, run_time):
if self.hparams.reward_function == "id":
reward = run_time
elif self.hparams.reward_function == "sqrt":
reward = math.sqrt(run_time)
elif self.hparams.reward_function == "log":
reward = math.log1p(run_time)
else:
raise NotImplementedError(
"Unrecognized reward function '%s', consider your "
"--reward_function flag value." % self.hparams.reward_function)
return reward
def build_controller(self):
"""RL optimization interface.
Returns:
ops: A dictionary holding handles of the model used for training.
"""
self._global_step = training_util.get_or_create_global_step()
ops = {}
ops["loss"] = 0
failing_signal = self.compute_reward(self.hparams.failing_signal)
ctr = {}
with tf_ops.name_scope("controller_{}".format(self.ctrl_id)):
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
ctr["reward"] = {"value": [], "ph": [], "update": []}
ctr["ready"] = {"value": [], "ph": [], "update": []}
ctr["best_reward"] = {"value": [], "update": []}
for i in range(self.hparams.num_children):
reward_value = variable_scope.get_local_variable(
"reward_{}".format(i),
initializer=0.0,
dtype=dtypes.float32,
trainable=False)
reward_ph = array_ops.placeholder(
dtypes.float32, shape=(), name="reward_ph_{}".format(i))
reward_update = state_ops.assign(
reward_value, reward_ph, use_locking=True)
ctr["reward"]["value"].append(reward_value)
ctr["reward"]["ph"].append(reward_ph)
ctr["reward"]["update"].append(reward_update)
best_reward = variable_scope.get_local_variable(
"best_reward_{}".format(i),
initializer=failing_signal,
dtype=dtypes.float32,
trainable=False)
ctr["best_reward"]["value"].append(best_reward)
ctr["best_reward"]["update"].append(
state_ops.assign(best_reward,
math_ops.minimum(best_reward, reward_update)))
ready_value = variable_scope.get_local_variable(
"ready_{}".format(i),
initializer=True,
dtype=dtypes.bool,
trainable=False)
ready_ph = array_ops.placeholder(
dtypes.bool, shape=(), name="ready_ph_{}".format(i))
ready_update = state_ops.assign(
ready_value, ready_ph, use_locking=True)
ctr["ready"]["value"].append(ready_value)
ctr["ready"]["ph"].append(ready_ph)
ctr["ready"]["update"].append(ready_update)
ctr["grouping_y_preds"], ctr["grouping_log_probs"] = self.get_groupings()
summary.histogram(
"grouping_actions",
array_ops.slice(ctr["grouping_y_preds"]["sample"], [0, 0],
[1, array_ops.shape(self.op_embeddings)[0]]))
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
ctr["baseline"] = variable_scope.get_local_variable(
"baseline",
initializer=failing_signal
if self.hparams.start_with_failing_signal else 0.0,
dtype=dtypes.float32,
trainable=False)
new_baseline = self.hparams.bl_dec * ctr["baseline"] + (
1 - self.hparams.bl_dec) * math_ops.reduce_mean(
ctr["reward"]["value"])
if not self.hparams.always_update_baseline:
baseline_mask = math_ops.less(ctr["reward"]["value"], failing_signal)
selected_reward = array_ops.boolean_mask(ctr["reward"]["value"],
baseline_mask)
selected_baseline = control_flow_ops.cond(
math_ops.reduce_any(baseline_mask),
lambda: math_ops.reduce_mean(selected_reward),
lambda: constant_op.constant(0, dtype=dtypes.float32))
ctr["pos_reward"] = selected_baseline
pos_ = math_ops.less(
constant_op.constant(0, dtype=dtypes.float32), selected_baseline)
selected_baseline = self.hparams.bl_dec * ctr["baseline"] + (
1 - self.hparams.bl_dec) * selected_baseline
selected_baseline = control_flow_ops.cond(
pos_, lambda: selected_baseline, lambda: ctr["baseline"])
new_baseline = control_flow_ops.cond(
math_ops.less(self.global_step,
self.hparams.stop_updating_after_steps),
lambda: new_baseline, lambda: selected_baseline)
ctr["baseline_update"] = state_ops.assign(
ctr["baseline"], new_baseline, use_locking=True)
ctr["y_preds"], ctr["log_probs"] = self.get_placements()
summary.histogram("actions", ctr["y_preds"]["sample"])
mask = math_ops.less(ctr["reward"]["value"], failing_signal)
ctr["loss"] = ctr["reward"]["value"] - ctr["baseline"]
ctr["loss"] *= (
ctr["log_probs"]["sample"] + ctr["grouping_log_probs"]["sample"])
selected_loss = array_ops.boolean_mask(ctr["loss"], mask)
selected_loss = control_flow_ops.cond(
math_ops.reduce_any(mask),
lambda: math_ops.reduce_mean(-selected_loss),
lambda: constant_op.constant(0, dtype=dtypes.float32))
ctr["loss"] = control_flow_ops.cond(
math_ops.less(self.global_step,
self.hparams.stop_updating_after_steps),
lambda: math_ops.reduce_mean(-ctr["loss"]), lambda: selected_loss)
ctr["reward_s"] = math_ops.reduce_mean(ctr["reward"]["value"])
summary.scalar("loss", ctr["loss"])
summary.scalar("avg_reward", ctr["reward_s"])
summary.scalar("best_reward_so_far", best_reward)
summary.scalar(
"advantage",
math_ops.reduce_mean(ctr["reward"]["value"] - ctr["baseline"]))
with variable_scope.variable_scope(
"optimizer", reuse=variable_scope.AUTO_REUSE):
(ctr["train_op"], ctr["lr"], ctr["grad_norm"],
ctr["grad_norms"]) = self._get_train_ops(
ctr["loss"],
tf_ops.get_collection(tf_ops.GraphKeys.TRAINABLE_VARIABLES),
self.global_step,
grad_bound=self.hparams.grad_bound,
lr_init=self.hparams.lr,
lr_dec=self.hparams.lr_dec,
start_decay_step=self.hparams.start_decay_step,
decay_steps=self.hparams.decay_steps,
optimizer_type=self.hparams.optimizer_type)
summary.scalar("gradnorm", ctr["grad_norm"])
summary.scalar("lr", ctr["lr"])
ctr["summary"] = summary.merge_all()
ops["controller"] = ctr
self.ops = ops
return ops
@property
def global_step(self):
return self._global_step
def create_op_embeddings(self, verbose=False):
if verbose:
print("process input graph for op embeddings")
self.num_ops = len(self.important_ops)
# topological sort of important nodes
topo_order = [op.name for op in self.important_ops]
# create index to name for topologicaly sorted important nodes
name_to_topo_order_index = {}
for idx, x in enumerate(topo_order):
name_to_topo_order_index[x] = idx
self.name_to_topo_order_index = name_to_topo_order_index
# create adj matrix
adj_dict = {}
for idx, op in enumerate(self.important_ops):
for output_op in self.get_node_fanout(op):
output_op_name = output_op.name
if output_op_name in self.important_op_names:
if name_to_topo_order_index[op.name] not in adj_dict:
adj_dict[name_to_topo_order_index[op.name]] = []
adj_dict[name_to_topo_order_index[op.name]].extend(
[name_to_topo_order_index[output_op_name], 1])
if output_op_name not in adj_dict:
adj_dict[name_to_topo_order_index[output_op_name]] = []
adj_dict[name_to_topo_order_index[output_op_name]].extend(
[name_to_topo_order_index[op.name], -1])
# get op_type op_output_shape, and adj info
output_embed_dim = (self.hparams.max_num_outputs *
self.hparams.max_output_size)
# TODO(bsteiner): don't filter based on used ops so that we can generalize
# to models that use other types of ops.
used_ops = set()
for node in self.important_ops:
op_type = str(node.op)
used_ops.add(op_type)
self.type_dict = {}
for op_type in self.cluster.ListAvailableOps():
if op_type in used_ops:
self.type_dict[op_type] = len(self.type_dict)
op_types = np.zeros([self.num_ops], dtype=np.int32)
op_output_shapes = np.full(
[self.num_ops, output_embed_dim], -1.0, dtype=np.float32)
for idx, node in enumerate(self.important_ops):
op_types[idx] = self.type_dict[node.op]
# output shape
op_name = node.name
for i, output_prop in enumerate(self.node_properties[op_name]):
if output_prop.shape.__str__() == "<unknown>":
continue
shape = output_prop.shape
for j, dim in enumerate(shape.dim):
if dim.size >= 0:
if i * self.hparams.max_output_size + j >= output_embed_dim:
break
op_output_shapes[idx,
i * self.hparams.max_output_size + j] = dim.size
# adj for padding
op_adj = np.full(
[self.num_ops, self.hparams.adj_embed_dim], 0, dtype=np.float32)
for idx in adj_dict:
neighbors = adj_dict[int(idx)]
min_dim = min(self.hparams.adj_embed_dim, len(neighbors))
padding_size = self.hparams.adj_embed_dim - min_dim
neighbors = neighbors[:min_dim] + [0] * padding_size
op_adj[int(idx)] = neighbors
# op_embedding starts here
op_embeddings = np.zeros(
[
self.num_ops,
1 + self.hparams.max_num_outputs * self.hparams.max_output_size +
self.hparams.adj_embed_dim
],
dtype=np.float32)
for idx, op_name in enumerate(topo_order):
op_embeddings[idx] = np.concatenate(
(np.array([op_types[idx]]), op_output_shapes[idx], op_adj[int(idx)]))
self.op_embeddings = constant_op.constant(
op_embeddings, dtype=dtypes.float32)
if verbose:
print("num_ops = {}".format(self.num_ops))
print("num_types = {}".format(len(self.type_dict)))
def get_groupings(self, *args, **kwargs):
num_children = self.hparams.num_children
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
grouping_actions_cache = variable_scope.get_local_variable(
"grouping_actions_cache",
initializer=init_ops.zeros_initializer,
dtype=dtypes.int32,
shape=[num_children, self.num_ops],
trainable=False)
input_layer = self.op_embeddings
input_layer = array_ops.expand_dims(input_layer, 0)
feed_ff_input_layer = array_ops.tile(input_layer, [num_children, 1, 1])
grouping_actions, grouping_log_probs = {}, {}
grouping_actions["sample"], grouping_log_probs[
"sample"] = self.make_grouping_predictions(feed_ff_input_layer)
grouping_actions["sample"] = state_ops.assign(grouping_actions_cache,
grouping_actions["sample"])
self.grouping_actions_cache = grouping_actions_cache
return grouping_actions, grouping_log_probs
def make_grouping_predictions(self, input_layer, reuse=None):
"""model that predicts grouping (grouping_actions).
Args:
input_layer: group_input_layer
reuse: reuse
Returns:
grouping_actions: actions
grouping_log_probs: log probabilities corresponding to actions
"""
with variable_scope.variable_scope(self.hparams.name, reuse=True):
# input_layer: tensor of size [1, num_ops, hidden_size]
w_grouping_ff = variable_scope.get_variable("w_grouping_ff")
w_grouping_softmax = variable_scope.get_variable("w_grouping_softmax")
batch_size = array_ops.shape(input_layer)[0]
embedding_dim = array_ops.shape(input_layer)[2]
reshaped = array_ops.reshape(input_layer,
[batch_size * self.num_ops, embedding_dim])
ff_output = math_ops.matmul(reshaped, w_grouping_ff)
logits = math_ops.matmul(ff_output, w_grouping_softmax)
if self.hparams.logits_std_noise > 0:
num_in_logits = math_ops.cast(
array_ops.size(logits), dtype=dtypes.float32)
avg_norm = math_ops.divide(
linalg_ops.norm(logits), math_ops.sqrt(num_in_logits))
logits_noise = random_ops.random_normal(
array_ops.shape(logits),
stddev=self.hparams.logits_std_noise * avg_norm)
logits = control_flow_ops.cond(
self.global_step > self.hparams.stop_noise_step, lambda: logits,
lambda: logits + logits_noise)
logits = array_ops.reshape(logits,
[batch_size * self.num_ops, self.num_groups])
actions = random_ops.multinomial(logits, 1, seed=self.hparams.seed)
actions = math_ops.cast(actions, dtypes.int32)
actions = array_ops.reshape(actions, [batch_size, self.num_ops])
action_label = array_ops.reshape(actions, [-1])
log_probs = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=action_label)
log_probs = array_ops.reshape(log_probs, [batch_size, -1])
log_probs = math_ops.reduce_sum(log_probs, 1)
grouping_actions = actions
grouping_log_probs = log_probs
return grouping_actions, grouping_log_probs
def create_group_embeddings(self, grouping_actions, verbose=False):
"""Approximating the blocks of a TF graph from a graph_def.
Args:
grouping_actions: grouping predictions.
verbose: print stuffs.
Returns:
groups: list of groups.
"""
groups = [
self._create_group_embeddings(grouping_actions, i, verbose) for
i in range(self.hparams.num_children)
]
return np.stack(groups, axis=0)
def _create_group_embeddings(self, grouping_actions, child_id, verbose=False):
"""Approximating the blocks of a TF graph from a graph_def for each child.
Args:
grouping_actions: grouping predictions.
child_id: child_id for the group.
verbose: print stuffs.
Returns:
groups: group embedding for the child_id.
"""
if verbose:
print("Processing input_graph")
# TODO(azalia): Build inter-adjacencies dag matrix.
# record dag_matrix
dag_matrix = np.zeros([self.num_groups, self.num_groups], dtype=np.float32)
for op in self.important_ops:
topo_op_index = self.name_to_topo_order_index[op.name]
group_index = grouping_actions[child_id][topo_op_index]
for output_op in self.get_node_fanout(op):
if output_op.name not in self.important_op_names:
continue
output_group_index = (
grouping_actions[child_id][self.name_to_topo_order_index[
output_op.name]])
dag_matrix[group_index, output_group_index] += 1.0
num_connections = np.sum(dag_matrix)
num_intra_group_connections = dag_matrix.trace()
num_inter_group_connections = num_connections - num_intra_group_connections
if verbose:
print("grouping evaluation metric")
print(("num_connections={} num_intra_group_connections={} "
"num_inter_group_connections={}").format(
num_connections, num_intra_group_connections,
num_inter_group_connections))
self.dag_matrix = dag_matrix
# output_shape
op_output_shapes = np.zeros(
[
len(self.important_ops),
self.hparams.max_num_outputs * self.hparams.max_output_size
],
dtype=np.float32)
for idx, op in enumerate(self.important_ops):
for i, output_properties in enumerate(self.node_properties[op.name]):
if output_properties.shape.__str__() == "<unknown>":
continue
if i > self.hparams.max_num_outputs:
break
shape = output_properties.shape
for j, dim in enumerate(shape.dim):
if dim.size > 0:
k = i * self.hparams.max_output_size + j
if k >= self.hparams.max_num_outputs * self.hparams.max_output_size:
break
op_output_shapes[idx, k] = dim.size
# group_embedding
group_embedding = np.zeros(
[
self.num_groups, len(self.type_dict) +
self.hparams.max_num_outputs * self.hparams.max_output_size
],
dtype=np.float32)
for op_index, op in enumerate(self.important_ops):
group_index = grouping_actions[child_id][
self.name_to_topo_order_index[op.name]]
type_name = str(op.op)
type_index = self.type_dict[type_name]
group_embedding[group_index, type_index] += 1
group_embedding[group_index, :self.hparams.max_num_outputs * self.hparams.
max_output_size] += (
op_output_shapes[op_index])
grouping_adjacencies = np.concatenate(
[dag_matrix, np.transpose(dag_matrix)], axis=1)
group_embedding = np.concatenate(
[grouping_adjacencies, group_embedding], axis=1)
group_normalizer = np.amax(group_embedding, axis=1, keepdims=True)
group_embedding /= (group_normalizer + 1.0)
if verbose:
print("Finished Processing Input Graph")
return group_embedding
def get_placements(self, *args, **kwargs):
num_children = self.hparams.num_children
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
actions_cache = variable_scope.get_local_variable(
"actions_cache",
initializer=init_ops.zeros_initializer,
dtype=dtypes.int32,
shape=[num_children, self.num_groups],
trainable=False)
x = self.seq2seq_input_layer
last_c, last_h, attn_mem = self.encode(x)
actions, log_probs = {}, {}
actions["sample"], log_probs["sample"] = (
self.decode(
x, last_c, last_h, attn_mem, mode="sample"))
actions["target"], log_probs["target"] = (
self.decode(
x,
last_c,
last_h,
attn_mem,
mode="target",
y=actions_cache))
actions["greedy"], log_probs["greedy"] = (
self.decode(
x, last_c, last_h, attn_mem, mode="greedy"))
actions["sample"] = control_flow_ops.cond(
self.global_step < self.hparams.stop_sampling,
lambda: state_ops.assign(actions_cache, actions["sample"]),
lambda: state_ops.assign(actions_cache, actions["target"]))
self.actions_cache = actions_cache
return actions, log_probs
def encode(self, x):
"""Encoder using LSTM.
Args:
x: tensor of size [num_children, num_groups, embedding_size]
Returns:
last_c, last_h: tensors of size [num_children, hidden_size], the final
LSTM states
attn_mem: tensor of size [num_children, num_groups, hidden_size], the
attention
memory, i.e. concatenation of all hidden states, linearly transformed by
an attention matrix attn_w_1
"""
if self.hparams.bi_lstm:
with variable_scope.variable_scope(self.hparams.name, reuse=True):
w_lstm_forward = variable_scope.get_variable("encoder_lstm_forward")
w_lstm_backward = variable_scope.get_variable("encoder_lstm_backward")
forget_bias = variable_scope.get_variable("encoder_forget_bias")
attn_w_1 = variable_scope.get_variable("attn_w_1")
else:
with variable_scope.variable_scope(self.hparams.name, reuse=True):
w_lstm = variable_scope.get_variable("encoder_lstm")
forget_bias = variable_scope.get_variable("encoder_forget_bias")
attn_w_1 = variable_scope.get_variable("attn_w_1")
embedding_size = array_ops.shape(x)[2]
signals = array_ops.split(x, self.num_groups, axis=1)
for i in range(len(signals)):
signals[i] = array_ops.reshape(
signals[i], [self.hparams.num_children, embedding_size])
if self.hparams.bi_lstm:
def body(i, prev_c_forward, prev_h_forward, prev_c_backward,
prev_h_backward):
"""while loop for LSTM."""
signal_forward = signals[i]
next_c_forward, next_h_forward = lstm(signal_forward, prev_c_forward,
prev_h_forward, w_lstm_forward,
forget_bias)
signal_backward = signals[self.num_groups - 1 - i]
next_c_backward, next_h_backward = lstm(
signal_backward, prev_c_backward, prev_h_backward, w_lstm_backward,
forget_bias)
next_h = array_ops.concat([next_h_forward, next_h_backward], axis=1)
all_h.append(next_h)
return (next_c_forward, next_h_forward, next_c_backward,
next_h_backward)
c_forward = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size / 2],
dtype=dtypes.float32)
h_forward = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size / 2],
dtype=dtypes.float32)
c_backward = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size / 2],
dtype=dtypes.float32)
h_backward = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size / 2],
dtype=dtypes.float32)
all_h = []
for i in range(0, self.num_groups):
c_forward, h_forward, c_backward, h_backward = body(
i, c_forward, h_forward, c_backward, h_backward)
last_c = array_ops.concat([c_forward, c_backward], axis=1)
last_h = array_ops.concat([h_forward, h_backward], axis=1)
attn_mem = array_ops.stack(all_h)
else:
def body(i, prev_c, prev_h):
signal = signals[i]
next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias)
all_h.append(next_h)
return next_c, next_h
c = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size],
dtype=dtypes.float32)
h = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size],
dtype=dtypes.float32)
all_h = []
for i in range(0, self.num_groups):
c, h = body(i, c, h)
last_c = c
last_h = h
attn_mem = array_ops.stack(all_h)
attn_mem = array_ops.transpose(attn_mem, [1, 0, 2])
attn_mem = array_ops.reshape(
attn_mem,
[self.hparams.num_children * self.num_groups, self.hparams.hidden_size])
attn_mem = math_ops.matmul(attn_mem, attn_w_1)
attn_mem = array_ops.reshape(
attn_mem,
[self.hparams.num_children, self.num_groups, self.hparams.hidden_size])
return last_c, last_h, attn_mem
def decode(self,
x,
last_c,
last_h,
attn_mem,
mode="target",
y=None):
"""Decoder using LSTM.
Args:
x: tensor of size [num_children, num_groups, embedding_size].
last_c: tensor of size [num_children, hidden_size], the final LSTM states
computed by self.encoder.
last_h: same as last_c.
attn_mem: tensor of size [num_children, num_groups, hidden_size].
mode: "target" or "sample".
y: tensor of size [num_children, num_groups], the device placements.
Returns:
actions: tensor of size [num_children, num_groups], the placements of
devices
"""
with variable_scope.variable_scope(self.hparams.name, reuse=True):
w_lstm = variable_scope.get_variable("decoder_lstm")
forget_bias = variable_scope.get_variable("decoder_forget_bias")
device_embeddings = variable_scope.get_variable("device_embeddings")
device_softmax = variable_scope.get_variable("device_softmax")
device_go_embedding = variable_scope.get_variable("device_go_embedding")
attn_w_2 = variable_scope.get_variable("attn_w_2")
attn_v = variable_scope.get_variable("attn_v")
actions = tensor_array_ops.TensorArray(
dtypes.int32,
size=self.num_groups,
infer_shape=False,
clear_after_read=False)
# pylint: disable=unused-argument
def condition(i, *args):
return math_ops.less(i, self.num_groups)
# pylint: disable=missing-docstring
def body(i, prev_c, prev_h, actions, log_probs):
# pylint: disable=g-long-lambda
signal = control_flow_ops.cond(
math_ops.equal(i, 0),
lambda: array_ops.tile(device_go_embedding,
[self.hparams.num_children, 1]),
lambda: embedding_ops.embedding_lookup(device_embeddings,
actions.read(i - 1))
)
if self.hparams.keep_prob is not None:
signal = nn_ops.dropout(signal, self.hparams.keep_prob)
next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias)
query = math_ops.matmul(next_h, attn_w_2)
query = array_ops.reshape(
query, [self.hparams.num_children, 1, self.hparams.hidden_size])
query = math_ops.tanh(query + attn_mem)
query = array_ops.reshape(query, [
self.hparams.num_children * self.num_groups, self.hparams.hidden_size
])
query = math_ops.matmul(query, attn_v)
query = array_ops.reshape(query,
[self.hparams.num_children, self.num_groups])
query = nn_ops.softmax(query)
query = array_ops.reshape(query,
[self.hparams.num_children, self.num_groups, 1])
query = math_ops.reduce_sum(attn_mem * query, axis=1)
query = array_ops.concat([next_h, query], axis=1)
logits = math_ops.matmul(query, device_softmax)
logits /= self.hparams.temperature
if self.hparams.tanh_constant > 0:
logits = math_ops.tanh(logits) * self.hparams.tanh_constant
if self.hparams.logits_std_noise > 0:
num_in_logits = math_ops.cast(
array_ops.size(logits), dtype=dtypes.float32)
avg_norm = math_ops.divide(
linalg_ops.norm(logits), math_ops.sqrt(num_in_logits))
logits_noise = random_ops.random_normal(
array_ops.shape(logits),
stddev=self.hparams.logits_std_noise * avg_norm)
logits = control_flow_ops.cond(
self.global_step > self.hparams.stop_noise_step, lambda: logits,
lambda: logits + logits_noise)
if mode == "sample":
next_y = random_ops.multinomial(logits, 1, seed=self.hparams.seed)
elif mode == "greedy":
next_y = math_ops.argmax(logits, 1)
elif mode == "target":
next_y = array_ops.slice(y, [0, i], [-1, 1])
else:
raise NotImplementedError
next_y = math_ops.cast(next_y, dtypes.int32)
next_y = array_ops.reshape(next_y, [self.hparams.num_children])
actions = actions.write(i, next_y)
log_probs += nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=next_y)
return i + 1, next_c, next_h, actions, log_probs
loop_vars = [
constant_op.constant(0, dtype=dtypes.int32), last_c, last_h, actions,
array_ops.zeros([self.hparams.num_children], dtype=dtypes.float32)
]
loop_outputs = control_flow_ops.while_loop(condition, body, loop_vars)
last_c = loop_outputs[-4]
last_h = loop_outputs[-3]
actions = loop_outputs[-2].stack()
actions = array_ops.transpose(actions, [1, 0])
log_probs = loop_outputs[-1]
return actions, log_probs
def eval_placement(self,
sess,
child_id=0,
verbose=False):
grouping_actions, actions = sess.run([
self.grouping_actions_cache,
self.actions_cache
])
grouping_actions = grouping_actions[child_id]
actions = actions[child_id]
if verbose:
global_step = sess.run(self.global_step)
if global_step % 100 == 0:
log_string = "op group assignments: "
for a in grouping_actions:
log_string += "{} ".format(a)
print(log_string[:-1])
log_string = "group device assignments: "
for a in actions:
log_string += "{} ".format(a)
print(log_string[:-1])
for op in self.important_ops:
topo_order_index = self.name_to_topo_order_index[op.name]
group_index = grouping_actions[topo_order_index]
op.device = self.devices[actions[group_index]].name
try:
_, run_time, _ = self.cluster.MeasureCosts(self.item)
except errors.ResourceExhaustedError:
run_time = self.hparams.failing_signal
return run_time
def update_reward(self,
sess,
run_time,
child_id=0,
verbose=False):
reward = self.compute_reward(run_time)
controller_ops = self.ops["controller"]
_, best_reward = sess.run(
[
controller_ops["reward"]["update"][child_id],
controller_ops["best_reward"]["update"][child_id]
],
feed_dict={
controller_ops["reward"]["ph"][child_id]: reward,
})
if verbose:
print(("run_time={:<.5f} reward={:<.5f} "
"best_reward={:<.5f}").format(run_time, reward, best_reward))
# Reward is a double, best_reward a float: allow for some slack in the
# comparison.
updated = abs(best_reward - reward) < 1e-6
return updated
def generate_grouping(self, sess):
controller_ops = self.ops["controller"]
grouping_actions = sess.run(controller_ops["grouping_y_preds"]["sample"])
return grouping_actions
def generate_placement(self, grouping, sess):
controller_ops = self.ops["controller"]
feed_seq2seq_input_dict = {}
feed_seq2seq_input_dict[self.seq2seq_input_layer] = grouping
sess.run(
controller_ops["y_preds"]["sample"], feed_dict=feed_seq2seq_input_dict)
def process_reward(self, sess):
controller_ops = self.ops["controller"]
run_ops = [
controller_ops["loss"], controller_ops["lr"],
controller_ops["grad_norm"], controller_ops["grad_norms"],
controller_ops["train_op"]
]
sess.run(run_ops)
sess.run(controller_ops["baseline_update"])
def _get_train_ops(self,
loss,
tf_variables,
global_step,
grad_bound=1.25,
lr_init=1e-3,
lr_dec=0.9,
start_decay_step=10000,
decay_steps=100,
optimizer_type="adam"):
"""Loss optimizer.
Args:
loss: scalar tf tensor
tf_variables: list of training variables, typically
tf.compat.v1.trainable_variables()
global_step: global_step
grad_bound: max gradient norm
lr_init: initial learning rate
lr_dec: leaning rate decay coefficient
start_decay_step: start decaying learning rate after this many steps
decay_steps: apply decay rate factor at this step intervals
optimizer_type: optimizer type should be either adam or sgd
Returns:
train_op: training op
learning_rate: scalar learning rate tensor
grad_norm: l2 norm of the gradient vector
all_grad_norms: l2 norm of each component
"""
lr_gstep = global_step - start_decay_step
def f1():
return constant_op.constant(lr_init)
def f2():
return learning_rate_decay.exponential_decay(lr_init, lr_gstep,
decay_steps, lr_dec, True)
learning_rate = control_flow_ops.cond(
math_ops.less(global_step, start_decay_step),
f1,
f2,
name="learning_rate")
if optimizer_type == "adam":
opt = adam.AdamOptimizer(learning_rate)
elif optimizer_type == "sgd":
opt = gradient_descent.GradientDescentOptimizer(learning_rate)
grads_and_vars = opt.compute_gradients(loss, tf_variables)
grad_norm = clip_ops.global_norm([g for g, v in grads_and_vars])
all_grad_norms = {}
clipped_grads = []
clipped_rate = math_ops.maximum(grad_norm / grad_bound, 1.0)
for g, v in grads_and_vars:
if g is not None:
if isinstance(g, tf_ops.IndexedSlices):
clipped = g.values / clipped_rate
norm_square = math_ops.reduce_sum(clipped * clipped)
clipped = tf_ops.IndexedSlices(clipped, g.indices)
else:
clipped = g / clipped_rate
norm_square = math_ops.reduce_sum(clipped * clipped)
all_grad_norms[v.name] = math_ops.sqrt(norm_square)
clipped_grads.append((clipped, v))
train_op = opt.apply_gradients(clipped_grads, global_step)
return train_op, learning_rate, grad_norm, all_grad_norms
def lstm(x, prev_c, prev_h, w_lstm, forget_bias):
"""LSTM cell.
Args:
x: tensors of size [num_children, hidden_size].
prev_c: tensors of size [num_children, hidden_size].
prev_h: same as prev_c.
w_lstm: .
forget_bias: .
Returns:
next_c:
next_h:
"""
ifog = math_ops.matmul(array_ops.concat([x, prev_h], axis=1), w_lstm)
i, f, o, g = array_ops.split(ifog, 4, axis=1)
i = math_ops.sigmoid(i)
f = math_ops.sigmoid(f + forget_bias)
o = math_ops.sigmoid(o)
g = math_ops.tanh(g)
next_c = i * g + f * prev_c
next_h = o * math_ops.tanh(next_c)
return next_c, next_h
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements a Cloud Datastore query splitter.
For internal use only. No backwards compatibility guarantees.
"""
# pytype: skip-file
from apache_beam.io.gcp.datastore.v1new import types
from apache_beam.options.value_provider import ValueProvider
__all__ = ['QuerySplitterError', 'SplitNotPossibleError', 'get_splits']
SCATTER_PROPERTY_NAME = '__scatter__'
KEY_PROPERTY_NAME = '__key__'
# The number of keys to sample for each split.
KEYS_PER_SPLIT = 32
class QuerySplitterError(Exception):
"""Top-level error type."""
class SplitNotPossibleError(QuerySplitterError):
"""Raised when some parameter of the query does not allow splitting."""
def get_splits(client, query, num_splits):
"""Returns a list of sharded queries for the given Cloud Datastore query.
This will create up to the desired number of splits, however it may return
less splits if the desired number of splits is unavailable. This will happen
if the number of split points provided by the underlying Datastore is less
than the desired number, which will occur if the number of results for the
query is too small.
This implementation of the QuerySplitter uses the __scatter__ property to
gather random split points for a query.
Note: This implementation is derived from the java query splitter in
https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/master/java/datastore/src/main/java/com/google/datastore/v1/client/QuerySplitterImpl.java
Args:
client: the datastore client.
query: the query to split.
num_splits: the desired number of splits.
Returns:
A list of split queries, of a max length of `num_splits`
Raises:
QuerySplitterError: if split could not be performed owing to query or split
parameters.
"""
if num_splits <= 1:
raise SplitNotPossibleError('num_splits must be > 1, got: %d' % num_splits)
validate_split(query)
splits = []
client_scatter_keys = _get_scatter_keys(client, query, num_splits)
last_client_key = None
for next_client_key in _get_split_key(client_scatter_keys, num_splits):
splits.append(_create_split(last_client_key, next_client_key, query))
last_client_key = next_client_key
splits.append(_create_split(last_client_key, None, query))
return splits
def validate_split(query):
"""
Verifies that the given query can be properly scattered.
Note that equality and ancestor filters are allowed, however they may result
in inefficient sharding.
Raises:
QuerySplitterError if split could not be performed owing to query
parameters.
"""
if query.order:
raise SplitNotPossibleError('Query cannot have any sort orders.')
if query.limit is not None:
raise SplitNotPossibleError('Query cannot have a limit set.')
for filter in query.filters:
if isinstance(filter[1], ValueProvider):
filter_operator = filter[1].get()
else:
filter_operator = filter[1]
if filter_operator in ['<', '<=', '>', '>=']:
raise SplitNotPossibleError('Query cannot have any inequality filters.')
def _create_scatter_query(query, num_splits):
"""Creates a scatter query from the given user query."""
# There is a split containing entities before and after each scatter entity:
# ||---*------*------*------*------*------*------*---|| * = scatter entity
# If we represent each split as a region before a scatter entity, there is an
# extra region following the last scatter point. Thus, we do not need the
# scatter entity for the last region.
limit = (num_splits - 1) * KEYS_PER_SPLIT
scatter_query = types.Query(
kind=query.kind,
project=query.project,
namespace=query.namespace,
order=[SCATTER_PROPERTY_NAME],
projection=[KEY_PROPERTY_NAME],
limit=limit)
return scatter_query
class IdOrName(object):
"""Represents an ID or name of a Datastore key,
Implements sort ordering: by ID, then by name, keys with IDs before those
with names.
"""
def __init__(self, id_or_name):
self.id_or_name = id_or_name
if isinstance(id_or_name, str):
self.id = None
self.name = id_or_name
elif isinstance(id_or_name, int):
self.id = id_or_name
self.name = None
else:
raise TypeError('Unexpected type of id_or_name: %s' % id_or_name)
def __lt__(self, other):
if not isinstance(other, IdOrName):
return super(IdOrName, self).__lt__(other)
if self.id is not None:
if other.id is None:
return True
else:
return self.id < other.id
if other.id is not None:
return False
return self.name < other.name
def __eq__(self, other):
if not isinstance(other, IdOrName):
return super(IdOrName, self).__eq__(other)
return self.id == other.id and self.name == other.name
def __hash__(self):
return hash((self.id, self.other))
def client_key_sort_key(client_key):
"""Key function for sorting lists of ``google.cloud.datastore.key.Key``."""
sort_key = [client_key.project, client_key.namespace or '']
# A key path is made up of (kind, id_or_name) pairs. The last pair might be
# missing an id_or_name.
flat_path = list(client_key.flat_path)
while flat_path:
sort_key.append(flat_path.pop(0)) # kind
if flat_path:
sort_key.append(IdOrName(flat_path.pop(0)))
return sort_key
def _get_scatter_keys(client, query, num_splits):
"""Gets a list of split keys given a desired number of splits.
This list will contain multiple split keys for each split. Only a single split
key will be chosen as the split point, however providing multiple keys allows
for more uniform sharding.
Args:
client: the client to datastore containing the data.
query: the user query.
num_splits: the number of desired splits.
Returns:
A list of scatter keys returned by Datastore.
"""
scatter_point_query = _create_scatter_query(query, num_splits)
client_query = scatter_point_query._to_client_query(client)
client_key_splits = [
client_entity.key for client_entity in client_query.fetch(
client=client, limit=scatter_point_query.limit)
]
client_key_splits.sort(key=client_key_sort_key)
return client_key_splits
def _get_split_key(client_keys, num_splits):
"""Given a list of keys and a number of splits find the keys to split on.
Args:
client_keys: the list of keys.
num_splits: the number of splits.
Returns:
A list of keys to split on.
"""
# If the number of keys is less than the number of splits, we are limited
# in the number of splits we can make.
if not client_keys or (len(client_keys) < (num_splits - 1)):
return client_keys
# Calculate the number of keys per split. This should be KEYS_PER_SPLIT,
# but may be less if there are not KEYS_PER_SPLIT * (numSplits - 1) scatter
# entities.
#
# Consider the following dataset, where - represents an entity and
# * represents an entity that is returned as a scatter entity:
# ||---*-----*----*-----*-----*------*----*----||
# If we want 4 splits in this data, the optimal split would look like:
# ||---*-----*----*-----*-----*------*----*----||
# | | |
# The scatter keys in the last region are not useful to us, so we never
# request them:
# ||---*-----*----*-----*-----*------*---------||
# | | |
# With 6 scatter keys we want to set scatter points at indexes: 1, 3, 5.
#
# We keep this as a float so that any "fractional" keys per split get
# distributed throughout the splits and don't make the last split
# significantly larger than the rest.
num_keys_per_split = max(1.0, float(len(client_keys)) / (num_splits - 1))
split_client_keys = []
# Grab the last sample for each split, otherwise the first split will be too
# small.
for i in range(1, num_splits):
split_index = int(round(i * num_keys_per_split) - 1)
split_client_keys.append(client_keys[split_index])
return split_client_keys
def _create_split(last_client_key, next_client_key, query):
"""Create a new {@link Query} given the query and range.
Args:
last_client_key: the previous key. If null then assumed to be the beginning.
next_client_key: the next key. If null then assumed to be the end.
query: query to base the split query on.
Returns:
A split query with fetches entities in the range [last_key, next_client_key)
"""
if not (last_client_key or next_client_key):
return query
split_query = query.clone()
# Copy filters and possible convert the default empty tuple to empty list.
filters = list(split_query.filters)
if last_client_key:
filters.append((KEY_PROPERTY_NAME, '>=', last_client_key))
if next_client_key:
filters.append((KEY_PROPERTY_NAME, '<', next_client_key))
split_query.filters = filters
return split_query
|
|
import os
import uuid
import httplib
import datetime
import jwe
import jwt
import furl
from flask import request
from flask import redirect
from flask import make_response
from modularodm.exceptions import NoResultsFound
from modularodm import Q
from framework import sentry
from framework.auth import cas
from framework.auth import Auth
from framework.auth import oauth_scopes
from framework.routing import json_renderer
from framework.sentry import log_exception
from framework.exceptions import HTTPError
from framework.transactions.context import TokuTransaction
from framework.transactions.handlers import no_auto_transaction
from framework.auth.decorators import must_be_logged_in, must_be_signed, collect_auth
from website import mails
from website import settings
from website.files.models import FileNode, TrashedFileNode, StoredFileNode
from website.project import decorators
from website.addons.base import exceptions
from website.addons.base import signals as file_signals
from website.addons.base import StorageAddonBase
from website.models import User, Node, NodeLog
from website.project.model import DraftRegistration, MetaSchema
from website.util import rubeus
from website.profile.utils import get_gravatar
from website.project.decorators import must_be_valid_project, must_be_contributor_or_public
from website.project.utils import serialize_node
# import so that associated listener is instantiated and gets emails
from website.notifications.events.files import FileEvent # noqa
FILE_GONE_ERROR_MESSAGE = u'''
<style>
.file-download{{display: none;}}
.file-share{{display: none;}}
.file-delete{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
This link to the file "{file_name}" is no longer valid.
</div>'''
WATERBUTLER_JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
@decorators.must_have_permission('write')
@decorators.must_not_be_registration
def disable_addon(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
deleted = node.delete_addon(addon_name, auth)
return {'deleted': deleted}
@must_be_logged_in
def get_addon_user_config(**kwargs):
user = kwargs['auth'].user
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
addon = user.get_addon(addon_name)
if addon is None:
raise HTTPError(httplib.BAD_REQUEST)
return addon.to_json(user)
permission_map = {
'create_folder': 'write',
'revisions': 'read',
'metadata': 'read',
'download': 'read',
'upload': 'write',
'delete': 'write',
'copy': 'write',
'move': 'write',
'copyto': 'write',
'moveto': 'write',
'copyfrom': 'read',
'movefrom': 'write',
}
def check_access(node, auth, action, cas_resp):
"""Verify that user can perform requested action on resource. Raise appropriate
error code if action cannot proceed.
"""
permission = permission_map.get(action, None)
if permission is None:
raise HTTPError(httplib.BAD_REQUEST)
if cas_resp:
if permission == 'read':
if node.is_public:
return True
required_scope = oauth_scopes.CoreScopes.NODE_FILE_READ
else:
required_scope = oauth_scopes.CoreScopes.NODE_FILE_WRITE
if not cas_resp.authenticated \
or required_scope not in oauth_scopes.normalize_scopes(cas_resp.attributes['accessTokenScope']):
raise HTTPError(httplib.FORBIDDEN)
if permission == 'read' and node.can_view(auth):
return True
if permission == 'write' and node.can_edit(auth):
return True
# Users attempting to register projects with components might not have
# `write` permissions for all components. This will result in a 403 for
# all `copyto` actions as well as `copyfrom` actions if the component
# in question is not public. To get around this, we have to recursively
# check the node's parent node to determine if they have `write`
# permissions up the stack.
# TODO(hrybacki): is there a way to tell if this is for a registration?
# All nodes being registered that receive the `copyto` action will have
# `node.is_registration` == True. However, we have no way of telling if
# `copyfrom` actions are originating from a node being registered.
# TODO This is raise UNAUTHORIZED for registrations that have not been archived yet
if action == 'copyfrom' or (action == 'copyto' and node.is_registration):
parent = node.parent_node
while parent:
if parent.can_edit(auth):
return True
parent = parent.parent_node
# Users with the PREREG_ADMIN_TAG should be allowed to download files
# from prereg challenge draft registrations.
try:
prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', 2)
)
allowed_nodes = [node] + node.parents
prereg_draft_registration = DraftRegistration.find(
Q('branched_from', 'in', [n._id for n in allowed_nodes]) &
Q('registration_schema', 'eq', prereg_schema)
)
if action == 'download' and \
auth.user is not None and \
prereg_draft_registration.count() > 0 and \
settings.PREREG_ADMIN_TAG in auth.user.system_tags:
return True
except NoResultsFound:
pass
raise HTTPError(httplib.FORBIDDEN if auth.user else httplib.UNAUTHORIZED)
def make_auth(user):
if user is not None:
return {
'id': user._id,
'email': '{}@osf.io'.format(user._id),
'name': user.fullname,
}
return {}
@collect_auth
def get_auth(auth, **kwargs):
cas_resp = None
if not auth.user:
# Central Authentication Server OAuth Bearer Token
authorization = request.headers.get('Authorization')
if authorization and authorization.startswith('Bearer '):
client = cas.get_client()
try:
access_token = cas.parse_auth_header(authorization)
cas_resp = client.profile(access_token)
except cas.CasError as err:
sentry.log_exception()
# NOTE: We assume that the request is an AJAX request
return json_renderer(err)
if cas_resp.authenticated:
auth.user = User.load(cas_resp.user)
try:
data = jwt.decode(
jwe.decrypt(request.args.get('payload', '').encode('utf-8'), WATERBUTLER_JWE_KEY),
settings.WATERBUTLER_JWT_SECRET,
options={'require_exp': True},
algorithm=settings.WATERBUTLER_JWT_ALGORITHM
)['data']
except (jwt.InvalidTokenError, KeyError):
raise HTTPError(httplib.FORBIDDEN)
if not auth.user:
auth.user = User.from_cookie(data.get('cookie', ''))
try:
action = data['action']
node_id = data['nid']
provider_name = data['provider']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
node = Node.load(node_id)
if not node:
raise HTTPError(httplib.NOT_FOUND)
check_access(node, auth, action, cas_resp)
provider_settings = node.get_addon(provider_name)
if not provider_settings:
raise HTTPError(httplib.BAD_REQUEST)
try:
credentials = provider_settings.serialize_waterbutler_credentials()
waterbutler_settings = provider_settings.serialize_waterbutler_settings()
except exceptions.AddonError:
log_exception()
raise HTTPError(httplib.BAD_REQUEST)
return {'payload': jwe.encrypt(jwt.encode({
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=settings.WATERBUTLER_JWT_EXPIRATION),
'data': {
'auth': make_auth(auth.user), # A waterbutler auth dict not an Auth object
'credentials': credentials,
'settings': waterbutler_settings,
'callback_url': node.api_url_for(
('create_waterbutler_log' if not node.is_registration else 'registration_callbacks'),
_absolute=True,
),
}
}, settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM), WATERBUTLER_JWE_KEY)}
LOG_ACTION_MAP = {
'move': NodeLog.FILE_MOVED,
'copy': NodeLog.FILE_COPIED,
'rename': NodeLog.FILE_RENAMED,
'create': NodeLog.FILE_ADDED,
'update': NodeLog.FILE_UPDATED,
'delete': NodeLog.FILE_REMOVED,
'create_folder': NodeLog.FOLDER_CREATED,
}
@must_be_signed
@no_auto_transaction
@must_be_valid_project
def create_waterbutler_log(payload, **kwargs):
with TokuTransaction():
try:
auth = payload['auth']
action = LOG_ACTION_MAP[payload['action']]
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
user = User.load(auth['id'])
if user is None:
raise HTTPError(httplib.BAD_REQUEST)
auth = Auth(user=user)
node = kwargs['node'] or kwargs['project']
if action in (NodeLog.FILE_MOVED, NodeLog.FILE_COPIED):
for bundle in ('source', 'destination'):
for key in ('provider', 'materialized', 'name', 'nid'):
if key not in payload[bundle]:
raise HTTPError(httplib.BAD_REQUEST)
dest = payload['destination']
src = payload['source']
if src is not None and dest is not None:
dest_path = dest['materialized']
src_path = src['materialized']
if dest_path.endswith('/') and src_path.endswith('/'):
dest_path = os.path.dirname(dest_path)
src_path = os.path.dirname(src_path)
if (
os.path.split(dest_path)[0] == os.path.split(src_path)[0] and
dest['provider'] == src['provider'] and
dest['nid'] == src['nid'] and
dest['name'] != src['name']
):
action = LOG_ACTION_MAP['rename']
destination_node = node # For clarity
source_node = Node.load(payload['source']['nid'])
source = source_node.get_addon(payload['source']['provider'])
destination = node.get_addon(payload['destination']['provider'])
payload['source'].update({
'materialized': payload['source']['materialized'].lstrip('/'),
'addon': source.config.full_name,
'url': source_node.web_url_for(
'addon_view_or_download_file',
path=payload['source']['path'].lstrip('/'),
provider=payload['source']['provider']
),
'node': {
'_id': source_node._id,
'url': source_node.url,
'title': source_node.title,
}
})
payload['destination'].update({
'materialized': payload['destination']['materialized'].lstrip('/'),
'addon': destination.config.full_name,
'url': destination_node.web_url_for(
'addon_view_or_download_file',
path=payload['destination']['path'].lstrip('/'),
provider=payload['destination']['provider']
),
'node': {
'_id': destination_node._id,
'url': destination_node.url,
'title': destination_node.title,
}
})
payload.update({
'node': destination_node._id,
'project': destination_node.parent_id,
})
if not payload.get('errors'):
destination_node.add_log(
action=action,
auth=auth,
params=payload
)
if payload.get('email') is True or payload.get('errors'):
mails.send_mail(
user.username,
mails.FILE_OPERATION_FAILED if payload.get('errors')
else mails.FILE_OPERATION_SUCCESS,
action=payload['action'],
source_node=source_node,
destination_node=destination_node,
source_path=payload['source']['materialized'],
destination_path=payload['source']['materialized'],
source_addon=payload['source']['addon'],
destination_addon=payload['destination']['addon'],
)
if payload.get('error'):
# Action failed but our function succeeded
# Bail out to avoid file_signals
return {'status': 'success'}
else:
try:
metadata = payload['metadata']
node_addon = node.get_addon(payload['provider'])
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if node_addon is None:
raise HTTPError(httplib.BAD_REQUEST)
metadata['path'] = metadata['path'].lstrip('/')
node_addon.create_waterbutler_log(auth, action, metadata)
with TokuTransaction():
file_signals.file_updated.send(node=node, user=user, event_type=action, payload=payload)
return {'status': 'success'}
@file_signals.file_updated.connect
def addon_delete_file_node(self, node, user, event_type, payload):
""" Get addon StoredFileNode(s), move it into the TrashedFileNode collection
and remove it from StoredFileNode.
Required so that the guids of deleted addon files are not re-pointed when an
addon file or folder is moved or renamed.
"""
if event_type == 'file_removed' and payload.get('provider', None) != 'osfstorage':
provider = payload['provider']
path = payload['metadata']['path']
materialized_path = payload['metadata']['materialized']
if path.endswith('/'):
folder_children = FileNode.resolve_class(provider, FileNode.ANY).find(
Q('provider', 'eq', provider) &
Q('node', 'eq', node) &
Q('materialized_path', 'startswith', materialized_path)
)
for item in folder_children:
if item.kind == 'file' and not TrashedFileNode.load(item._id):
item.delete(user=user)
elif item.kind == 'folder':
StoredFileNode.remove_one(item.stored_object)
else:
try:
file_node = FileNode.resolve_class(provider, FileNode.FILE).find_one(
Q('node', 'eq', node) &
Q('materialized_path', 'eq', materialized_path)
)
except NoResultsFound:
file_node = None
if file_node and not TrashedFileNode.load(file_node._id):
file_node.delete(user=user)
@must_be_valid_project
def addon_view_or_download_file_legacy(**kwargs):
query_params = request.args.to_dict()
node = kwargs.get('node') or kwargs['project']
action = query_params.pop('action', 'view')
provider = kwargs.get('provider', 'osfstorage')
if kwargs.get('path'):
path = kwargs['path']
elif kwargs.get('fid'):
path = kwargs['fid']
if 'download' in request.path or request.path.startswith('/api/v1/'):
action = 'download'
if kwargs.get('vid'):
query_params['version'] = kwargs['vid']
# If provider is OSFstorage, check existence of requested file in the filetree
# This prevents invalid GUIDs from being created
if provider == 'osfstorage':
node_settings = node.get_addon('osfstorage')
try:
path = node_settings.get_root().find_child_by_name(path)._id
except NoResultsFound:
raise HTTPError(
404, data=dict(
message_short='File not found',
message_long='You requested a file that does not exist.'
)
)
return redirect(
node.web_url_for(
'addon_view_or_download_file',
path=path,
provider=provider,
action=action,
**query_params
),
code=httplib.MOVED_PERMANENTLY
)
@must_be_valid_project
@must_be_contributor_or_public
def addon_deleted_file(auth, node, **kwargs):
"""Shows a nice error message to users when they try to view
a deleted file
"""
# Allow file_node to be passed in so other views can delegate to this one
trashed = kwargs.get('file_node') or TrashedFileNode.load(kwargs.get('trashed_id'))
if not trashed:
raise HTTPError(httplib.NOT_FOUND, {
'message_short': 'Not Found',
'message_long': 'This file does not exist'
})
ret = serialize_node(node, auth, primary=True)
ret.update(rubeus.collect_addon_assets(node))
ret.update({
'urls': {
'render': None,
'sharejs': None,
'mfr': settings.MFR_SERVER_URL,
'gravatar': get_gravatar(auth.user, 25),
'files': node.web_url_for('collect_file_trees'),
},
'extra': {},
'size': 9966699, # Prevent file from being editted, just in case
'sharejs_uuid': None,
'file_name': trashed.name,
'file_path': trashed.path,
'provider': trashed.provider,
'materialized_path': trashed.materialized_path,
'error': FILE_GONE_ERROR_MESSAGE.format(file_name=trashed.name),
'private': getattr(node.get_addon(trashed.provider), 'is_private', False),
})
return ret, httplib.GONE
@must_be_valid_project
@must_be_contributor_or_public
def addon_view_or_download_file(auth, path, provider, **kwargs):
extras = request.args.to_dict()
extras.pop('_', None) # Clean up our url params a bit
action = extras.get('action', 'view')
node = kwargs.get('node') or kwargs['project']
node_addon = node.get_addon(provider)
if not path:
raise HTTPError(httplib.BAD_REQUEST)
if not isinstance(node_addon, StorageAddonBase):
raise HTTPError(httplib.BAD_REQUEST, {
'message_short': 'Bad Request',
'message_long': 'The add-on containing this file is no longer connected to the {}.'.format(node.project_or_component)
})
if not node_addon.has_auth:
raise HTTPError(httplib.UNAUTHORIZED, {
'message_short': 'Unauthorized',
'message_long': 'The add-on containing this file is no longer authorized.'
})
if not node_addon.complete:
raise HTTPError(httplib.BAD_REQUEST, {
'message_short': 'Bad Request',
'message_long': 'The add-on containing this file is no longer configured.'
})
file_node = FileNode.resolve_class(provider, FileNode.FILE).get_or_create(node, path)
# Note: Cookie is provided for authentication to waterbutler
# it is overriden to force authentication as the current user
# the auth header is also pass to support basic auth
version = file_node.touch(
request.headers.get('Authorization'),
**dict(
extras,
cookie=request.cookies.get(settings.COOKIE_NAME)
)
)
if version is None:
if file_node.get_guid():
# If this file has been successfully view before but no longer exists
# Move file to trashed file node
if not TrashedFileNode.load(file_node._id):
file_node.delete()
# Show a nice error message
return addon_deleted_file(file_node=file_node, **kwargs)
raise HTTPError(httplib.NOT_FOUND, {
'message_short': 'Not Found',
'message_long': 'This file does not exist'
})
# TODO clean up these urls and unify what is used as a version identifier
if request.method == 'HEAD':
return make_response(('', 200, {
'Location': file_node.generate_waterbutler_url(**dict(extras, direct=None, version=version.identifier))
}))
if action == 'download':
return redirect(file_node.generate_waterbutler_url(**dict(extras, direct=None, version=version.identifier)))
if len(request.path.strip('/').split('/')) > 1:
guid = file_node.get_guid(create=True)
return redirect(furl.furl('/{}/'.format(guid._id)).set(args=extras).url)
return addon_view_file(auth, node, file_node, version)
def addon_view_file(auth, node, file_node, version):
# TODO: resolve circular import issue
from website.addons.wiki import settings as wiki_settings
if isinstance(version, tuple):
version, error = version
error = error.replace('\n', '').strip()
else:
error = None
ret = serialize_node(node, auth, primary=True)
if file_node._id not in node.file_guid_to_share_uuids:
node.file_guid_to_share_uuids[file_node._id] = uuid.uuid4()
node.save()
if ret['user']['can_edit']:
sharejs_uuid = str(node.file_guid_to_share_uuids[file_node._id])
else:
sharejs_uuid = None
download_url = furl.furl(request.url.encode('utf-8')).set(args=dict(request.args, **{
'direct': None,
'mode': 'render',
'action': 'download',
}))
render_url = furl.furl(settings.MFR_SERVER_URL).set(
path=['render'],
args={'url': download_url.url}
)
ret.update({
'urls': {
'render': render_url.url,
'mfr': settings.MFR_SERVER_URL,
'sharejs': wiki_settings.SHAREJS_URL,
'gravatar': get_gravatar(auth.user, 25),
'files': node.web_url_for('collect_file_trees'),
},
'error': error,
'file_name': file_node.name,
'file_name_title': os.path.splitext(file_node.name)[0],
'file_name_ext': os.path.splitext(file_node.name)[1],
'file_path': file_node.path,
'sharejs_uuid': sharejs_uuid,
'provider': file_node.provider,
'materialized_path': file_node.materialized_path,
'extra': version.metadata.get('extra', {}),
'size': version.size if version.size is not None else 9966699,
'private': getattr(node.get_addon(file_node.provider), 'is_private', False),
'file_tags': [tag._id for tag in file_node.tags],
'file_guid': file_node.get_guid()._id,
'file_id': file_node._id,
'allow_comments': file_node.provider in settings.ADDONS_COMMENTABLE
})
ret.update(rubeus.collect_addon_assets(node))
return ret
|
|
import gobject
import gobject.constants
import weakref
from inspect import getmembers, ismethod
from weak import weak_connect
from util import append_attr
SSIGNAL = gobject.SIGNAL_RUN_LAST | gobject.SIGNAL_NO_RECURSE | gobject.SIGNAL_ACTION
SACTION = gobject.SIGNAL_RUN_LAST | gobject.SIGNAL_ACTION
def attach_signal_connect_info(attr, obj, func, after, idle):
"""
Adds signal connection info to function
Used by signal and trigger decorators
"""
connect_params = dict(after=after, idle=idle)
if func:
if not getattr(func, '__call__'):
raise Exception('Signal decorator accept callable or connect params')
append_attr(func, attr, (obj, connect_params))
return func
else:
def inner(func):
append_attr(func, attr, (obj, connect_params))
return func
return inner
class Signal(object):
"""
Unbounded signal
Class holds signal parameters which used to construct correct GObject later.
Instantiating signals::
Signal() # Signal without arguments
Signal(object, int) # Signal with two arguments
Signal(object, return_type=int) # Signal with return type
Signal(type=gobject.SIGNAL_RUN_FIRST) # default signal type is gobject.SIGNAL_RUN_LAST | gobject.SIGNAL_NO_RECURSE | gobject.SIGNAL_ACTION
Unbounded signal instances can be used to mark callbacks for automatic signal connecting::
signal = Signal()
class Handler(object):
@signal
def callback(...): pass # Usual (in gobject terms) signal connection
@signal(idle=True)
def callback(...): pass # Connects signal with idle wrapper
@signal(after=True)
def callback(...): pass # sender.connect_after(callback) analog
@signal(idle=9999)
def callback(...): pass # idle wrapper will start callback with specified priority
"""
def __init__(self, *signal_args, **kwargs):
allowed_named_arguments = set(('type', 'return_type'))
if not all(r in allowed_named_arguments for r in kwargs.keys()):
raise Exception('Signal constructor takes only `type` and `return_type` named arguments')
self.signal_type = kwargs.get('type', SSIGNAL)
self.return_type = kwargs.get('return_type', None)
self.arg_types = tuple(signal_args)
self.name = None
def __call__(self, func=None, after=False, idle=False):
return attach_signal_connect_info('signals_to_connect', self, func, after, idle)
def emit(self):
"""IDE hint"""
raise Exception('You cannot emit unbounded signals')
def stop_emission(self):
"""IDE hint"""
raise Exception('You cannot stop emission of unbounded signals')
class SignalManager(object):
"""
Wrapper for inner GObject with signals
Example::
class Manager(SignalManager):
show = Signal()
hide = Signal()
``Manager.show`` and ``Manager.hide`` is unbounded signals and can be used as
decorators to callbacks. Whereas ``instance.show`` and ``instance.hide`` is bounded and
can be used to emit signals::
class Plugin(object):
def __init__(self):
self.signals = Manager()
self.signals.connect_signals()
self.signals.hide.emit()
@Manager.show
def show(self, sender):
pass
Inner GObject with necessary __gsignals__ is constructed during instance initialization
"""
registered_classes = {}
def __new__(cls, *args, **kwargs):
try:
newcls = SignalManager.registered_classes[cls]
obj = newcls.__new__(newcls, *args, **kwargs)
gobject.GObject.__init__(obj)
newcls.__init__(obj, *args, **kwargs)
return obj
except KeyError:
pass
def make_signal_prop(signal):
def inner(self):
return BoundedSignal(self, signal)
return property(inner)
newdict = dict(cls.__dict__)
signals = {}
for sname, signal in cls.__dict__.iteritems():
if isinstance(signal, Signal):
signal.name = sname.replace('_', '-')
signals[signal.name] = (signal.signal_type,
signal.return_type, signal.arg_types)
newdict[sname] = make_signal_prop(signal)
if not signals:
return super(SignalManager, cls).__new__(cls, *args, **kwargs)
newdict['__gsignals__'] = signals
newdict['weak_connect'] = SignalManager.weak_connect
newdict['connect_signals'] = SignalManager.connect_signals
for k, v in newdict.iteritems():
if hasattr(v, 'im_func'):
newdict[k] = v.im_func
newcls = type(cls.__name__, (gobject.GObject,), newdict)
gobject.type_register(newcls)
SignalManager.registered_classes[cls] = newcls
obj = newcls.__new__(newcls, *args, **kwargs)
gobject.GObject.__init__(obj)
newcls.__init__(obj, *args, **kwargs)
return obj
def connect_signals(self, obj):
"""
Connects marked object methods
"""
for attr, value in getmembers(obj.__class__, ismethod):
for signal, connect_params in getattr(value, 'signals_to_connect', ()):
id = self.weak_connect(signal, obj, attr, **connect_params)
append_handler_to_object(obj, attr, id, self, signal.name)
def weak_connect(self, signal, obj, attr, after, idle):
"""
Connects unbounded signal
@param signal: Unbounded signal
"""
return weak_connect(self, signal.name, obj, attr, after=after, idle=idle)
class BoundedSignal(object):
"""
This class knows about its GObject wrapper and unbounded signal name
This allows it to emit signals. Bounded signal weakly connected to its manager so
you can safely use it in any context
"""
def __init__(self, manager, signal):
self.manager = weakref.ref(manager)
self.signal = signal
def connect(self, obj, attr, after=False, idle=False):
manager = self.manager()
if manager:
manager.weak_connect(self.signal, obj, attr, after=after, idle=idle)
def emit(self, *args):
manager = self.manager()
if manager:
return manager.emit(self.signal.name, *args)
def stop_emission(self):
manager = self.manager()
if manager:
return manager.stop_emission(self.signal.name)
def connect_external(sender_name, signal_name, after=False, idle=False):
def inner(func):
return attach_signal_connect_info('external_signals_to_connect',
(sender_name, signal_name), func, after, idle)
return inner
def connect_external_signals(obj, **kwargs):
for attr, value in getmembers(obj.__class__, ismethod):
for (sender_name, signal_name), connect_params in getattr(value, 'external_signals_to_connect', ()):
sender = kwargs[sender_name]
id = weak_connect(sender, signal_name, obj, attr, **connect_params)
append_handler_to_object(obj, attr, id, sender, signal_name, sender_name)
def append_handler_to_object(obj, attr, handler_id, sender, signal_name, sender_name=None):
name = attr + '_handler'
if not hasattr(obj, name):
setattr(obj, name, HandlerHolder())
getattr(obj, name).add(handler_id, sender, signal_name, sender_name)
def connect_all(obj, *signal_managers, **external_senders):
[s.connect_signals(obj) for s in signal_managers]
if external_senders:
connect_external_signals(obj, **external_senders)
class Handler(object):
def __init__(self, handler_id, sender, signal_name, sender_name):
self.id = handler_id
self.sender = weakref.ref(sender)
self.signal_name = signal_name
self.sender_name = sender_name
def is_match(self, sender, sender_name, signal_name):
result = True
result = result and (sender == None or self.sender() is sender)
result = result and (sender_name == None or self.sender_name == sender_name)
result = result and (signal_name == None or self.signal_name == signal_name)
return result
def block(self):
sender = self.sender()
if sender:
sender.handler_block(self.id)
def unblock(self):
sender = self.sender()
if sender:
sender.handler_unblock(self.id)
class HandlerHolder(object):
def __init__(self):
self.handlers = []
def add(self, id, sender, signal_name, sender_name=None):
self.handlers.append(Handler(id, sender, signal_name, sender_name))
def block(self):
try:
(handler,) = self.handlers
handler.block()
except ValueError:
raise Exception('There are several signals connected to callback')
def unblock(self):
try:
(handler,) = self.handlers
handler.unblock()
except ValueError:
raise Exception('There are several signals connected to callback')
@property
def id(self):
try:
(handler,) = self.handlers
return handler.id
except ValueError:
raise Exception('There are several signals connected to callback')
def __call__(self, sender=None, sender_name=None, signal_name=None):
handler = None
for h in self.handlers:
if h.is_match(sender=sender, sender_name=sender_name, signal_name=signal_name):
if handler:
raise Exception('Match returns several handlers')
else:
handler = h
return handler
|
|
import getpass
import gzip
import os
import shutil
import socket
import time
from pygresql import pg
from gppylib import gplog
from gppylib.commands.base import WorkerPool, Command, ExecutionError
from gppylib.commands.gp import Psql
from gppylib.commands.unix import Scp
from gppylib.utils import shellEscape
from gppylib.db import dbconn
from gppylib.db.dbconn import execSQL, execSQLForSingleton
from gppylib.gparray import GpArray
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from gppylib.operations import Operation
from gppylib.operations.backup_utils import *
from gppylib.operations.unix import CheckFile, CheckRemoteDir, MakeRemoteDir, CheckRemotePath
from re import compile, search, sub
"""
TODO: partial restore. In 4.x, dump will only occur on primaries.
So, after a dump, dump files must be pushed to mirrors. (This is a task for gpcrondump.)
"""
""" TODO: centralize logging """
logger = gplog.get_default_logger()
WARN_MARK = '<<<<<'
# TODO: use CLI-agnostic custom exceptions instead of ExceptionNoStackTraceNeeded
def update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size):
qry = "SELECT * FROM gp_update_ao_master_stats('%s.%s')" % (pg.escape_string(escapeDoubleQuoteInSQLString(ao_schema)),
pg.escape_string(escapeDoubleQuoteInSQLString(ao_table)))
rows = execSQLForSingleton(conn, qry)
if counter % batch_size == 0:
conn.commit()
def generate_restored_tables(results, restored_tables, restored_schema, restore_all):
restored_ao_tables = set()
for (tbl, sch) in results:
if restore_all:
restored_ao_tables.add((sch, tbl))
elif sch in restored_schema:
restored_ao_tables.add((sch, tbl))
else:
tblname = '%s.%s' % (sch, tbl)
if tblname in restored_tables:
restored_ao_tables.add((sch, tbl))
return restored_ao_tables
def update_ao_statistics(context, restored_tables, restored_schema=[], restore_all=False):
# Restored schema is different from restored tables as restored schema updates all tables within that schema.
qry = """SELECT c.relname,n.nspname
FROM pg_class c, pg_namespace n
WHERE c.relnamespace=n.oid
AND (c.relstorage='a' OR c.relstorage='c')"""
counter = 1
try:
results = execute_sql(qry, context.master_port, context.restore_db)
restored_ao_tables = generate_restored_tables(results, restored_tables, restored_schema, restore_all)
if len(restored_ao_tables) == 0:
logger.info("No AO/CO tables restored, skipping statistics update...")
return
with dbconn.connect(dbconn.DbURL(port=context.master_port, dbname=context.restore_db)) as conn:
for ao_schema, ao_table in sorted(restored_ao_tables):
update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size=1000)
counter = counter + 1
conn.commit()
except Exception as e:
logger.info("Error updating ao statistics after restore")
raise e
def get_restore_tables_from_table_file(table_file):
if not os.path.isfile(table_file):
raise Exception('Table file does not exist "%s"' % table_file)
return get_lines_from_file(table_file)
def get_incremental_restore_timestamps(context, full_timestamp):
inc_file = context.generate_filename("increments", timestamp=full_timestamp)
timestamps = get_lines_from_file(inc_file)
sorted_timestamps = sorted(timestamps, key=lambda x: int(x), reverse=True)
incremental_restore_timestamps = []
try:
incremental_restore_timestamps = sorted_timestamps[sorted_timestamps.index(context.timestamp):]
except ValueError:
pass
return incremental_restore_timestamps
def get_partition_list(context):
partition_list_file = context.generate_filename("partition_list")
partition_list = get_lines_from_file(partition_list_file)
partition_list = [split_fqn(p) for p in partition_list]
return partition_list
def get_dirty_table_file_contents(context, timestamp):
dirty_list_file = context.generate_filename("dirty_table", timestamp=timestamp)
return get_lines_from_file(dirty_list_file)
def create_plan_file_contents(context, table_set_from_metadata_file, incremental_restore_timestamps, full_timestamp):
restore_set = {}
for ts in incremental_restore_timestamps:
restore_set[ts] = []
if context.netbackup_service_host:
restore_file_with_nbu(context, "dirty_table", timestamp=ts)
dirty_tables = get_dirty_table_file_contents(context, ts)
for dt in dirty_tables:
if dt in table_set_from_metadata_file:
table_set_from_metadata_file.remove(dt)
restore_set[ts].append(dt)
restore_set[full_timestamp] = []
if len(table_set_from_metadata_file) != 0:
for table in table_set_from_metadata_file:
restore_set[full_timestamp].append(table)
return restore_set
def write_to_plan_file(plan_file_contents, plan_file):
if plan_file is None or not plan_file:
raise Exception('Invalid plan file %s' % str(plan_file))
sorted_plan_file_contents = sorted(plan_file_contents, key=lambda x: int(x), reverse=True)
lines_to_write = []
for ts in sorted_plan_file_contents:
tables_str = ','.join(plan_file_contents[ts])
lines_to_write.append(ts + ':' + tables_str)
write_lines_to_file(plan_file, lines_to_write)
return lines_to_write
def create_restore_plan(context):
dump_tables = get_partition_list(context)
table_set_from_metadata_file = [schema + '.' + table for schema, table in dump_tables]
full_timestamp = get_full_timestamp_for_incremental(context)
incremental_restore_timestamps = get_incremental_restore_timestamps(context, full_timestamp)
plan_file_contents = create_plan_file_contents(context, table_set_from_metadata_file, incremental_restore_timestamps, full_timestamp)
plan_file = context.generate_filename("plan")
write_to_plan_file(plan_file_contents, plan_file)
return plan_file
def is_incremental_restore(context):
filename = context.generate_filename("report")
if not os.path.isfile(filename):
logger.warn('Report file %s does not exist for restore timestamp %s' % (filename, context.timestamp))
return False
report_file_contents = get_lines_from_file(filename)
if check_backup_type(report_file_contents, 'Incremental'):
return True
return False
def is_full_restore(context):
filename = context.generate_filename("report")
if not os.path.isfile(filename):
raise Exception('Report file %s does not exist for restore timestamp %s' % (filename, context.timestamp))
report_file_contents = get_lines_from_file(filename)
if check_backup_type(report_file_contents, 'Full'):
return True
return False
def get_plan_file_contents(context):
plan_file_items = []
plan_file = context.generate_filename("plan")
if not os.path.isfile(plan_file):
raise Exception('Plan file %s does not exist' % plan_file)
plan_file_lines = get_lines_from_file(plan_file)
if len(plan_file_lines) <= 0:
raise Exception('Plan file %s has no contents' % plan_file)
for line in plan_file_lines:
if ':' not in line:
raise Exception('Invalid plan file format')
# timestamp is of length 14, don't split by ':' in case table name contains ':'
# don't strip white space on table_list, schema and table name may contain white space
ts, table_list = line[:14], line[15:]
plan_file_items.append((ts.strip(), table_list))
return plan_file_items
def get_restore_table_list(table_list, restore_tables):
restore_table_set = set()
restore_list = []
if restore_tables is None or len(restore_tables) == 0:
restore_list = table_list
else:
for restore_table in restore_tables:
schema, table = split_fqn(restore_table)
restore_table_set.add((schema, table))
for tbl in table_list:
schema, table = split_fqn(tbl)
if (schema, table) in restore_table_set:
restore_list.append(tbl)
if restore_list == []:
return None
return create_temp_file_with_tables(restore_list)
def validate_restore_tables_list(plan_file_contents, restore_tables, restore_schemas=None):
"""
Check if the tables in plan file match any of the restore tables.
For schema level restore, check if table schema in plan file match
any member of schema list.
"""
if restore_tables is None:
return
table_set = set()
comp_set = set()
for ts, table in plan_file_contents:
tables = table.split(',')
for table in tables:
table_set.add(table)
invalid_tables = []
for table in restore_tables:
schema_name, table_name = split_fqn(table)
if restore_schemas and schema_name in restore_schemas:
continue
else:
comp_set.add(table)
if not comp_set.issubset(table_set):
invalid_tables.append(table)
comp_set.remove(table)
if invalid_tables != []:
raise Exception('Invalid tables for -T option: The following tables were not found in plan file : "%s"' % (invalid_tables))
#NetBackup related functions
def restore_state_files_with_nbu(context):
restore_file_with_nbu(context, "ao")
restore_file_with_nbu(context, "co")
restore_file_with_nbu(context, "last_operation")
def restore_config_files_with_nbu(context):
restore_file_with_nbu(context, "master_config")
gparray = GpArray.initFromCatalog(dbconn.DbURL(port=context.master_port), utility=True)
segments = gparray.getSegmentList()
for segment in segments:
seg_config_filename = context.generate_filename("segment_config", dbid=segment.get_primary_dbid(), directory=segment.getSegmentDataDirectory())
seg_host = segment.get_active_primary().getSegmentHostName()
restore_file_with_nbu(context, path=seg_config_filename, hostname=seg_host)
def _build_gpdbrestore_cmd_line(context, ts, table_file):
cmd = 'gpdbrestore -t %s --table-file %s -a -v --noplan --noanalyze --noaostats --no-validate-table-name' % (ts, table_file)
if context.backup_dir:
cmd += " -u %s" % context.backup_dir
if context.dump_prefix:
cmd += " --prefix=%s" % context.dump_prefix.strip('_')
if context.redirected_restore_db:
cmd += " --redirect=%s" % context.redirected_restore_db
if context.report_status_dir:
cmd += " --report-status-dir=%s" % context.report_status_dir
if context.ddboost:
cmd += " --ddboost"
if context.ddboost_storage_unit:
cmd += " --ddboost-storage-unit=%s" % context.ddboost_storage_unit
if context.netbackup_service_host:
cmd += " --netbackup-service-host=%s" % context.netbackup_service_host
if context.netbackup_block_size:
cmd += " --netbackup-block-size=%s" % context.netbackup_block_size
if context.change_schema:
cmd += " --change-schema=%s" % context.change_schema
return cmd
def truncate_restore_tables(context):
"""
Truncate either specific table or all tables under a schema
"""
try:
dburl = dbconn.DbURL(port=context.master_port, dbname=context.restore_db)
conn = dbconn.connect(dburl)
truncate_list = []
if context.restore_schemas:
for schemaname in context.restore_schemas:
truncate_list.extend(self.get_full_tables_in_schema(conn, schemaname))
else:
for restore_table in context.restore_tables:
schemaname, tablename = split_fqn(restore_table)
check_table_exists_qry = """SELECT EXISTS (
SELECT 1
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n on n.oid = c.relnamespace
WHERE n.nspname = '%s' and c.relname = '%s')""" % (pg.escape_string(schemaname),
pg.escape_string(tablename))
exists_result = execSQLForSingleton(conn, check_table_exists_qry)
if exists_result:
schema = escapeDoubleQuoteInSQLString(schemaname)
table = escapeDoubleQuoteInSQLString(tablename)
truncate_table = '%s.%s' % (schema, table)
truncate_list.append(truncate_table)
else:
logger.warning("Skipping truncate of %s.%s because the relation does not exist." % (context.restore_db, restore_table))
for t in truncate_list:
try:
qry = 'Truncate %s' % t
execSQL(conn, qry)
except Exception as e:
raise Exception("Could not truncate table %s.%s: %s" % (dbname, t, str(e).replace('\n', '')))
conn.commit()
except Exception as e:
raise Exception("Failure from truncating tables, %s" % (str(e).replace('\n', '')))
class RestoreDatabase(Operation):
def __init__(self, context):
self.context = context
def execute(self):
if self.context.redirected_restore_db:
self.context.restore_db = self.context.redirected_restore_db
if len(self.context.restore_tables) > 0 and self.context.truncate:
truncate_restore_tables(self.context)
if not self.context.ddboost:
ValidateSegments(self.context).run()
if self.context.redirected_restore_db and not self.context.drop_db:
self.create_database_if_not_exists()
self.create_gp_toolkit()
if self.context.restore_stats == "only":
self._restore_stats()
return
if self.context.drop_db:
self._multitry_createdb()
if self.context.restore_global:
self._restore_global(self.context)
if self.context.restore_global == "only":
return
"""
For full restore with table filter or for the first recurssion of the incremental restore
we first restore the schema, expand the parent partition table name's in the restore table
filter to include leaf partition names, and then restore data (only, using '-a' option).
"""
full_restore_with_filter = False
full_restore = is_full_restore(self.context)
begin_incremental = (is_incremental_restore(self.context) and not self.context.no_plan)
table_filter_file = self.create_filter_file() # returns None if nothing to filter
change_schema_file = self.create_change_schema_file() # returns None if nothing to filter
schema_level_restore_file = self.create_schema_level_file()
if (full_restore and len(self.context.restore_tables) > 0 and not self.context.no_plan) or begin_incremental or self.context.metadata_only:
if full_restore and not self.context.no_plan:
full_restore_with_filter = True
restore_line = self.create_schema_only_restore_string(table_filter_file, full_restore_with_filter, change_schema_file, schema_level_restore_file)
logger.info("Running metadata restore")
logger.info("Invoking commandline: %s" % restore_line)
cmd = Command('Invoking gp_restore', restore_line)
cmd.run(validateAfter=False)
self._process_result(cmd)
logger.info("Expanding parent partitions if any in table filter")
self.context.restore_tables = expand_partition_tables(self.context.restore_db, self.context.restore_tables)
if begin_incremental:
logger.info("Running data restore")
self.restore_incremental_data_only()
else:
table_filter_file = self.create_filter_file()
if not self.context.metadata_only:
restore_line = self.create_restore_string(table_filter_file, full_restore_with_filter, change_schema_file, schema_level_restore_file)
logger.info('gp_restore commandline: %s: ' % restore_line)
cmd = Command('Invoking gp_restore', restore_line)
cmd.run(validateAfter=False)
self._process_result(cmd)
if full_restore_with_filter:
restore_line = self.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter, change_schema_file, schema_level_restore_file)
logger.info("Running post data restore")
logger.info('gp_restore commandline: %s: ' % restore_line)
cmd = Command('Invoking gp_restore', restore_line)
cmd.run(validateAfter=False)
self._process_result(cmd)
restore_all=False
if not self.context.no_ao_stats:
logger.info("Updating AO/CO statistics on master")
# If we don't have a filter for table and schema, then we must be doing a full restore.
if len(self.context.restore_schemas) == 0 and len(self.context.restore_tables) == 0:
restore_all=True
update_ao_statistics(self.context, self.context.restore_tables, self.context.restore_schemas, restore_all=restore_all)
if not self.context.metadata_only:
if (not self.context.no_analyze) and len(self.context.restore_tables) == 0:
self._analyze(self.context)
elif (not self.context.no_analyze) and len(self.context.restore_tables) > 0:
self._analyze_restore_tables()
if self.context.restore_stats:
self._restore_stats()
self.tmp_files = [table_filter_file, change_schema_file, schema_level_restore_file]
self.cleanup_files_on_segments()
def _process_result(self, cmd):
res = cmd.get_results()
if res.rc == 0:
logger.info("gpdbrestore finished successfully")
elif res.rc == 2:
logger.warn("gpdbrestore finished but ERRORS were found, please check the restore report file for details")
else:
raise Exception('gpdbrestore finished unsuccessfully')
def cleanup_files_on_segments(self):
for tmp_file in self.tmp_files:
if tmp_file and os.path.isfile(tmp_file):
os.remove(tmp_file)
remove_file_on_segments(self.context, tmp_file)
def _analyze(self, context):
conn = None
logger.info('Commencing analyze of %s database, please wait' % context.restore_db)
try:
dburl = dbconn.DbURL(port=context.master_port, dbname=context.restore_db)
conn = dbconn.connect(dburl)
execSQL(conn, 'analyze')
conn.commit()
except Exception, e:
logger.warn('Issue with analyze of %s database' % context.restore_db)
else:
logger.info('Analyze of %s completed without error' % context.restore_db)
finally:
if conn:
conn.close()
def _analyze_restore_tables(self):
logger.info('Commencing analyze of restored tables in \'%s\' database, please wait' % self.context.restore_db)
batch_count = 0
try:
with dbconn.connect(dbconn.DbURL(port=self.context.master_port, dbname=self.context.restore_db)) as conn:
num_sqls = 0
analyze_list = []
# need to find out all tables under the schema and construct the new schema.table to analyze
if self.context.change_schema and self.context.restore_schemas:
schemaname = self.context.change_schema
analyze_list = self.get_full_tables_in_schema(conn, schemaname)
elif self.context.restore_schemas:
for schemaname in self.context.restore_schemas:
analyze_list.extend(self.get_full_tables_in_schema(conn, schemaname))
else:
for restore_table in self.context.restore_tables:
schemaname, tablename = split_fqn(restore_table)
if self.context.change_schema:
schema = escapeDoubleQuoteInSQLString(self.context.change_schema)
else:
schema = escapeDoubleQuoteInSQLString(schemaname)
table = escapeDoubleQuoteInSQLString(tablename)
restore_table = '%s.%s' % (schema, table)
analyze_list.append(restore_table)
for tbl in analyze_list:
analyze_table = "analyze " + tbl
try:
execSQL(conn, analyze_table)
except Exception as e:
raise Exception('Issue with \'ANALYZE\' of restored table \'%s\' in \'%s\' database' % (restore_table, self.context.restore_db))
else:
num_sqls += 1
if num_sqls == 1000: # The choice of batch size was choosen arbitrarily
batch_count +=1
logger.debug('Completed executing batch of 1000 tuple count SQLs')
conn.commit()
num_sqls = 0
except Exception as e:
logger.warn('Restore of \'%s\' database succeeded but \'ANALYZE\' of restored tables failed' % self.context.restore_db)
logger.warn('Please run ANALYZE manually on restored tables. Failure to run ANALYZE might result in poor database performance')
raise Exception(str(e))
else:
logger.info('\'Analyze\' of restored tables in \'%s\' database completed without error' % self.context.restore_db)
return batch_count
def get_full_tables_in_schema(self, conn, schemaname):
res = []
get_all_tables_qry = 'select \'"\' || schemaname || \'"\', \'"\' || tablename || \'"\'from pg_tables where schemaname = \'%s\';' % pg.escape_string(schemaname)
relations = execSQL(conn, get_all_tables_qry)
for relation in relations:
schema, table = relation[0], relation[1]
schema = escapeDoubleQuoteInSQLString(schema)
table = escapeDoubleQuoteInSQLString(table)
restore_table = '%s.%s' % (schema, table)
res.append(restore_table)
return res
def create_schema_level_file(self):
if not self.context.restore_schemas:
return None
schema_level_restore_file = create_temp_file_with_schemas(list(self.context.restore_schemas))
addresses = get_all_segment_addresses(self.context.master_port)
scp_file_to_hosts(addresses, schema_level_restore_file, self.context.batch_default)
return schema_level_restore_file
def create_change_schema_file(self):
if not self.context.change_schema:
return None
schema_list = [self.context.change_schema]
change_schema_file = create_temp_file_with_schemas(schema_list)
addresses = get_all_segment_addresses(self.context.master_port)
scp_file_to_hosts(addresses, change_schema_file, self.context.batch_default)
return change_schema_file
def create_filter_file(self):
if not self.context.restore_tables or len(self.context.restore_tables) == 0:
return None
table_filter_file = create_temp_file_with_tables(self.context.restore_tables)
addresses = get_all_segment_addresses(self.context.master_port)
scp_file_to_hosts(addresses, table_filter_file, self.context.batch_default)
return table_filter_file
def restore_incremental_data_only(self):
restore_data = False
plan_file_items = get_plan_file_contents(self.context)
table_files = []
restored_tables = []
validate_restore_tables_list(plan_file_items, self.context.restore_tables, self.context.restore_schemas)
for (ts, table_list) in plan_file_items:
if table_list:
restore_data = True
table_file = get_restore_table_list(table_list.strip('\n').split(','), self.context.restore_tables)
if table_file is None:
continue
cmd = _build_gpdbrestore_cmd_line(self.context, ts, table_file)
logger.info('Invoking commandline: %s' % cmd)
Command('Invoking gpdbrestore', cmd).run(validateAfter=True)
table_files.append(table_file)
restored_tables.extend(get_restore_tables_from_table_file(table_file))
if not restore_data:
raise Exception('There were no tables to restore. Check the plan file contents for restore timestamp %s' % self.context.timestamp)
if not self.context.no_ao_stats:
logger.info("Updating AO/CO statistics on master")
update_ao_statistics(self.context, restored_tables)
else:
logger.info("noaostats enabled. Skipping update of AO/CO statistics on master.")
for table_file in table_files:
if table_file:
os.remove(table_file)
return True
def _restore_global(self, context):
logger.info('Commencing restore of global objects')
global_file = context.generate_filename("global")
if not os.path.exists(global_file):
raise Exception('Unable to locate global file %s in dump set' % (global_file))
Psql('Invoking global dump', filename=global_file).run(validateAfter=True)
def _restore_stats(self):
logger.info('Commencing restore of statistics')
stats_filename = self.context.generate_filename("stats", directory="/tmp")
stats_path = self.context.generate_filename("stats")
if not os.path.exists(stats_path):
raise Exception('Unable to locate statistics file %s in dump set' % stats_filename)
# We need to replace existing starelid's in file to match starelid of tables in database in case they're different
# First, map each schemaname.tablename to its corresponding starelid
query = """SELECT t.schemaname || '.' || t.tablename, c.oid FROM pg_class c join pg_tables t ON c.relname = t.tablename
WHERE t.schemaname NOT IN ('pg_toast', 'pg_bitmapindex', 'pg_temp_1', 'pg_catalog', 'information_schema', 'gp_toolkit')"""
relids = {}
rows = execute_sql(query, self.context.master_port, self.context.restore_db)
for row in rows:
if len(row) != 2:
raise Exception("Invalid return from query: Expected 2 columns, got % columns" % (len(row)))
relids[row[0]] = str(row[1])
# Read in the statistics dump file, find each schemaname.tablename section, and replace the corresponding starelid
# This section is also where we filter out tables that are not in restore_tables
with open(stats_filename, "w") as outfile:
with open(stats_path, "r") as infile:
table_pattern = compile("-- Schema: (\w+), Table: (\w+)")
print_toggle = True
starelid_toggle = False
new_oid = ""
for line in infile:
matches = search(table_pattern, line)
if matches:
tablename = '%s.%s' % (matches.group(1), matches.group(2))
if len(self.context.restore_tables) == 0 or tablename in self.context.restore_tables:
try:
new_oid = relids[tablename]
print_toggle = True
starelid_toggle = True
except KeyError as e:
if "Attribute" not in line: # Only print a warning once per table, at the tuple count restore section
logger.warning("Cannot restore statistics for table %s: Table does not exist. Skipping...", tablename)
print_toggle = False
starelid_toggle = False
else:
print_toggle = False
if starelid_toggle and "::oid" in line:
line = " %s::oid,\n" % new_oid
starelid_toggle = False
if print_toggle:
outfile.write(line)
Psql('Invoking statistics restore', filename=stats_filename, database=self.context.restore_db).run(validateAfter=True)
def _multitry_createdb(self):
no_of_trys = 600
for _ in range(no_of_trys):
try:
self._process_createdb()
except ExceptionNoStackTraceNeeded:
time.sleep(1)
else:
return
raise ExceptionNoStackTraceNeeded('Failed to drop database %s' % self.context.restore_db)
def drop_database_if_exists(self):
conn = None
try:
dburl = dbconn.DbURL(port=self.context.master_port, dbname='template1')
conn = dbconn.connect(dburl)
count = execSQLForSingleton(conn, "select count(*) from pg_database where datname='%s';" % pg.escape_string(self.context.restore_db))
logger.info("Dropping Database %s" % self.context.restore_db)
if count == 1:
cmd = Command(name='drop database %s' % self.context.restore_db,
cmdStr='dropdb %s -p %s' % (checkAndAddEnclosingDoubleQuote(shellEscape(self.context.restore_db)), self.context.master_port))
cmd.run(validateAfter=True)
logger.info("Dropped Database %s" % self.context.restore_db)
except ExecutionError, e:
logger.exception("Could not drop database %s" % self.context.restore_db)
raise ExceptionNoStackTraceNeeded('Failed to drop database %s' % self.context.restore_db)
finally:
conn.close()
def create_database_if_not_exists(self):
conn = None
try:
dburl = dbconn.DbURL(port=self.context.master_port, dbname='template1')
conn = dbconn.connect(dburl)
count = execSQLForSingleton(conn, "select count(*) from pg_database where datname='%s';" % pg.escape_string(self.context.restore_db))
logger.info("Creating Database %s" % self.context.restore_db)
if count == 0:
cmd = Command(name='create database %s' % self.context.restore_db,
cmdStr='createdb %s -p %s -T template0' % (checkAndAddEnclosingDoubleQuote(shellEscape(self.context.restore_db)), self.context.master_port))
cmd.run(validateAfter=True)
logger.info("Created Database %s" % self.context.restore_db)
except ExecutionError, e:
logger.exception("Could not create database %s" % self.context.restore_db)
raise ExceptionNoStackTraceNeeded('Failed to create database %s' % self.context.restore_db)
finally:
conn.close()
def check_gp_toolkit(self):
GP_TOOLKIT_QUERY = """SELECT count(*)
FROM pg_class pgc, pg_namespace pgn
WHERE pgc.relnamespace=pgn.oid AND
pgn.nspname='gp_toolkit'
"""
with dbconn.connect(dbconn.DbURL(dbname=self.context.restore_db, port=self.context.master_port)) as conn:
res = dbconn.execSQLForSingleton(conn, GP_TOOLKIT_QUERY)
if res == 0:
return False
return True
def create_gp_toolkit(self):
if not self.check_gp_toolkit():
if 'GPHOME' not in os.environ:
logger.warn('Please set $GPHOME in your environment')
logger.warn('Skipping creation of gp_toolkit since $GPHOME/share/postgresql/gp_toolkit.sql could not be found')
else:
logger.info('Creating gp_toolkit schema for database "%s"' % self.context.restore_db)
Psql(name='create gp_toolkit',
filename=os.path.join(os.environ['GPHOME'],
'share', 'postgresql',
'gp_toolkit.sql'),
database=self.context.restore_db).run(validateAfter=True)
def _process_createdb(self):
self.drop_database_if_exists()
if self.context.redirected_restore_db:
self.create_database_if_not_exists()
else:
createdb_file = self.context.generate_filename("cdatabase")
logger.info('Invoking sql file: %s' % createdb_file)
Psql('Invoking schema dump', filename=createdb_file).run(validateAfter=True)
self.create_gp_toolkit()
def backup_dir_is_writable(self):
if self.context.backup_dir and not self.context.report_status_dir:
try:
directory = context.get_backup_dir()
check_dir_writable(directory)
except Exception as e:
logger.warning('Backup directory %s is not writable. Error %s' % (directory, str(e)))
logger.warning('Since --report-status-dir option is not specified, report and status file will be written in segment data directory.')
return False
return True
def create_restore_string(self, table_filter_file, full_restore_with_filter, change_schema_file=None, schema_level_restore_file=None):
user = getpass.getuser()
hostname = socket.gethostname() # TODO: can this just be localhost? bash was using `hostname`
path = self.context.get_gpd_path()
restore_line = "gp_restore -i -h %s -p %s -U %s --gp-i" % (hostname, self.context.master_port, user)
if self.context.dump_prefix:
logger.info("Adding --prefix")
restore_line += " --prefix=%s" % self.context.dump_prefix
restore_line += " --gp-k=%s --gp-l=p" % (self.context.timestamp)
restore_line += " --gp-d=%s" % path
if self.context.report_status_dir:
restore_line += " --gp-r=%s" % self.context.report_status_dir
restore_line += " --status=%s" % self.context.report_status_dir
elif self.context.backup_dir and self.context.backup_dir_is_writable():
restore_line += " --gp-r=%s" % path
restore_line += " --status=%s" % path
# else
# gp-r is not set, restore.c sets it to MASTER_DATA_DIRECTORY if not specified.
# status file is not set, cdbbackup.c sets it to SEGMENT_DATA_DIRECTORY if not specified.
if table_filter_file:
restore_line += " --gp-f=%s" % table_filter_file
if self.context.compress:
restore_line += " --gp-c"
restore_line += " -d %s" % checkAndAddEnclosingDoubleQuote(shellEscape(self.context.restore_db))
# Restore only data if no_plan or full_restore_with_filter is True
if self.context.no_plan or full_restore_with_filter:
restore_line += " -a"
if self.context.no_ao_stats:
restore_line += " --gp-nostats"
if self.context.ddboost:
restore_line += " --ddboost"
if self.context.ddboost_storage_unit:
restore_line += " --ddboost-storage-unit=%s" % self.context.ddboost_storage_unit
if self.context.netbackup_service_host:
restore_line += " --netbackup-service-host=%s" % self.context.netbackup_service_host
if self.context.netbackup_block_size:
restore_line += " --netbackup-block-size=%s" % self.context.netbackup_block_size
if change_schema_file:
restore_line += " --change-schema-file=%s" % change_schema_file
if schema_level_restore_file:
restore_line += " --schema-level-file=%s" % schema_level_restore_file
return restore_line
def create_post_data_schema_only_restore_string(self, table_filter_file, full_restore_with_filter, change_schema_file=None, schema_level_restore_file=None):
user = getpass.getuser()
hostname = socket.gethostname() # TODO: can this just be localhost? bash was using `hostname`
path = self.context.get_gpd_path()
restore_line = "gp_restore -i -h %s -p %s -U %s --gp-d=%s --gp-i" % (hostname, self.context.master_port, user, path)
restore_line += " --gp-k=%s --gp-l=p" % (self.context.timestamp)
if full_restore_with_filter:
restore_line += " -P"
if self.context.report_status_dir:
restore_line += " --gp-r=%s" % self.context.report_status_dir
restore_line += " --status=%s" % self.context.report_status_dir
elif self.context.backup_dir and self.context.backup_dir_is_writable():
restore_line += " --gp-r=%s" % path
restore_line += " --status=%s" % path
# else
# gp-r is not set, restore.c sets it to MASTER_DATA_DIRECTORY if not specified.
# status file is not set, cdbbackup.c sets it to SEGMENT_DATA_DIRECTORY if not specified.
if self.context.dump_prefix:
logger.info("Adding --prefix")
restore_line += " --prefix=%s" % self.context.dump_prefix
if table_filter_file:
restore_line += " --gp-f=%s" % table_filter_file
if change_schema_file:
restore_line += " --change-schema-file=%s" % change_schema_file
if schema_level_restore_file:
restore_line += " --schema-level-file=%s" % schema_level_restore_file
if self.context.compress:
restore_line += " --gp-c"
restore_line += " -d %s" % checkAndAddEnclosingDoubleQuote(shellEscape(self.context.restore_db))
if self.context.ddboost:
restore_line += " --ddboost"
if self.context.ddboost_storage_unit:
restore_line += " --ddboost-storage-unit=%s" % self.context.ddboost_storage_unit
if self.context.netbackup_service_host:
restore_line += " --netbackup-service-host=%s" % self.context.netbackup_service_host
if self.context.netbackup_block_size:
restore_line += " --netbackup-block-size=%s" % self.context.netbackup_block_size
return restore_line
def create_schema_only_restore_string(self, table_filter_file, full_restore_with_filter, change_schema_file=None, schema_level_restore_file=None):
metadata_filename = self.context.generate_filename("metadata")
user = getpass.getuser()
hostname = socket.gethostname() # TODO: can this just be localhost? bash was using `hostname`
(gpr_path, status_path, gpd_path) = self.get_restore_line_paths()
restore_line = "gp_restore -i -h %s -p %s -U %s --gp-i" % (hostname, self.context.master_port, user)
restore_line += " --gp-k=%s --gp-l=p -s %s" % (self.context.timestamp, metadata_filename)
if full_restore_with_filter:
restore_line += " -P"
if gpr_path and status_path:
restore_line += " --gp-r=%s" % gpr_path
restore_line += " --status=%s" % status_path
# else
# gp-r is not set, restore.c sets it to MASTER_DATA_DIRECTORY if not specified.
# status file is not set, cdbbackup.c sets it to SEGMENT_DATA_DIRECTORY if not specified.
restore_line += " --gp-d=%s" % gpd_path
if self.context.dump_prefix:
logger.info("Adding --prefix")
restore_line += " --prefix=%s" % self.context.dump_prefix
if table_filter_file:
restore_line += " --gp-f=%s" % table_filter_file
if self.context.compress:
restore_line += " --gp-c"
restore_line += " -d %s" % checkAndAddEnclosingDoubleQuote(shellEscape(self.context.restore_db))
if self.context.ddboost:
restore_line += " --ddboost"
if self.context.ddboost_storage_unit:
restore_line += " --ddboost-storage-unit=%s" % self.context.ddboost_storage_unit
if self.context.netbackup_service_host:
restore_line += " --netbackup-service-host=%s" % self.context.netbackup_service_host
if self.context.netbackup_block_size:
restore_line += " --netbackup-block-size=%s" % self.context.netbackup_block_size
if change_schema_file:
restore_line += " --change-schema-file=%s" % change_schema_file
if schema_level_restore_file:
restore_line += " --schema-level-file=%s" % schema_level_restore_file
return restore_line
def get_restore_line_paths(self):
(gpr_path, status_path, gpd_path) = (None, None, None)
gpd_path = self.context.get_gpd_path()
if self.context.report_status_dir:
gpr_path = self.context.report_status_dir
status_path = self.context.report_status_dir
elif self.context.backup_dir and self.context.backup_dir_is_writable():
gpr_path = gpd_path
status_path = gpd_path
if self.context.ddboost:
gpd_path = "%s/%s" % (self.context.dump_dir, self.context.timestamp[0:8])
return (gpr_path, status_path, gpd_path)
class ValidateTimestamp(Operation):
def __init__(self, context):
self.context = context
def validate_metadata_file(self, compressed_file):
if self.context.netbackup_service_host:
logger.info('Backup for given timestamp was performed using NetBackup. Querying NetBackup server to check for the dump file.')
compress = check_file_dumped_with_nbu(self.context, path=compressed_file)
else:
compress = os.path.exists(compressed_file)
if not compress:
uncompressed_file = compressed_file[:compressed_file.index('.gz')]
if not os.path.exists(uncompressed_file):
raise ExceptionNoStackTraceNeeded('Unable to find {ucfile} or {ucfile}.gz. Skipping restore.'.format(ucfile=uncompressed_file))
return compress
def validate_timestamp_format(self):
if not self.context.timestamp:
raise Exception('Timestamp must not be None.')
else:
# timestamp has to be a string of 14 digits(YYYYMMDDHHMMSS)
timestamp_pattern = compile(r'\d{14}')
if not search(timestamp_pattern, self.context.timestamp):
raise Exception('Invalid timestamp specified, please specify in the following format: YYYYMMDDHHMMSS.')
def execute(self):
self.validate_timestamp_format()
createdb_file = self.context.generate_filename("cdatabase")
if not CheckFile(createdb_file).run():
raise ExceptionNoStackTraceNeeded("Dump file '%s' does not exist on Master" % createdb_file)
restore_db = GetDbName(createdb_file).run()
if not self.context.ddboost:
compressed_file = self.context.generate_filename("metadata")
compress = self.validate_metadata_file(compressed_file)
else:
compressed_file = self.context.generate_filename("postdata")
compress = CheckFile(compressed_file).run()
return (self.context.timestamp, restore_db, compress)
class ValidateSegments(Operation):
def __init__(self, context):
self.context = context
def execute(self):
""" TODO: Improve with grouping by host and ParallelOperation dispatch. """
gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.context.master_port, dbname='template1'), utility=True)
primaries = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
mdd = self.context.master_datadir
for seg in primaries:
if seg.isSegmentDown():
""" Why must every Segment function have the word Segment in it ?! """
raise ExceptionNoStackTraceNeeded("Host %s dir %s dbid %d marked as invalid" % (seg.getSegmentHostName(), seg.getSegmentDataDirectory(), seg.getSegmentDbId()))
self.context.master_datadir = seg.getSegmentDataDirectory()
if self.context.netbackup_service_host is None:
host = seg.getSegmentHostName()
path = self.context.generate_filename("dump", dbid=seg.getSegmentDbId())
exists = CheckRemotePath(path, host).run()
if not exists:
raise ExceptionNoStackTraceNeeded("No dump file on %s at %s" % (seg.getSegmentHostName(), path))
self.context.master_datadir = mdd
def check_table_name_format_and_duplicate(table_list, restore_schemas=None):
"""
verify table list, and schema list, resolve duplicates and overlaps
"""
restore_table_list = []
table_set = set()
# validate special characters
check_funny_chars_in_names(restore_schemas, is_full_qualified_name = False)
check_funny_chars_in_names(table_list)
# validate schemas
if restore_schemas:
restore_schemas = list(set(restore_schemas))
for restore_table in table_list:
if '.' not in restore_table:
raise Exception("No schema name supplied for %s, removing from list of tables to restore" % restore_table)
schema, table = split_fqn(restore_table)
# schema level restore will be handled before specific table restore, treat as duplicate
if not ((restore_schemas and schema in restore_schemas) or (schema, table) in table_set):
table_set.add((schema, table))
restore_table_list.append(restore_table)
return restore_table_list, restore_schemas
def validate_tablenames_exist_in_dump_file(restore_tables, dumped_tables):
unmatched_table_names = []
if dumped_tables:
dumped_table_names = [schema + '.' + table for (schema, table, _) in dumped_tables]
for table in restore_tables:
if table not in dumped_table_names:
unmatched_table_names.append(table)
else:
raise Exception('No dumped tables to restore.')
if len(unmatched_table_names) > 0:
raise Exception("Tables %s not found in backup" % unmatched_table_names)
class ValidateRestoreTables(Operation):
def __init__(self, context):
self.context = context
def execute(self):
existing_tables = []
table_counts = []
conn = None
try:
dburl = dbconn.DbURL(port=self.context.master_port, dbname=self.context.restore_db)
conn = dbconn.connect(dburl)
for restore_table in self.self.context.restore_tables:
schema, table = split_fqn(restore_table)
count = execSQLForSingleton(conn, "select count(*) from pg_class, pg_namespace where pg_class.relname = '%s' and pg_class.relnamespace = pg_namespace.oid and pg_namespace.nspname = '%s'" % (table, schema))
if count == 0:
logger.warn("Table %s does not exist in database %s, removing from list of tables to restore" % (table, self.context.restore_db))
continue
count = execSQLForSingleton(conn, "select count(*) from %s.%s" % (schema, table))
if count > 0:
logger.warn('Table %s has %d records %s' % (restore_table, count, WARN_MARK))
existing_tables.append(restore_table)
table_counts.append((restore_table, count))
finally:
if conn:
conn.close()
if len(existing_tables) == 0:
raise ExceptionNoStackTraceNeeded("Have no tables to restore")
logger.info("Have %d tables to restore, will continue" % len(existing_tables))
return (existing_tables, table_counts)
class CopyPostData(Operation):
''' Copy _post_data when using fake timestamp.
The same operation can be done with/without ddboost, because
the _post_data file is always kept on the master, not on the dd server '''
def __init__(self, context, fake_timestamp):
self.fake_timestamp = fake_timestamp
self.context = context
def execute(self):
# Build master _post_data file:
real_post_data = self.context.generate_filename("postdata")
fake_post_data = self.context.generate_filename("postdata", self.fake_timestamp)
shutil.copy(real_post_data, fake_post_data)
class GetDbName(Operation):
def __init__(self, createdb_file):
self.createdb_file = createdb_file
def execute(self):
f = open(self.createdb_file, 'r')
# assumption: 'CREATE DATABASE' line will reside within the first 50 lines of the gp_cdatabase_1_1_* file
for _ in range(0, 50):
line = f.readline()
if not line:
break
if line.startswith("CREATE DATABASE"):
restore_db = get_dbname_from_cdatabaseline(line)
if restore_db is None:
raise Exception('Expected database name after CREATE DATABASE in line "%s" of file "%s"' % (line, self.createdb_file))
return removeEscapingDoubleQuoteInSQLString(checkAndRemoveEnclosingDoubleQuote(restore_db), forceDoubleQuote=False)
else:
raise GetDbName.DbNameGiveUp()
raise GetDbName.DbNameNotFound()
class DbNameNotFound(Exception): pass
class DbNameGiveUp(Exception): pass
class RecoverRemoteDumps(Operation):
def __init__(self, context, host, path):
self.host = host
self.path = path
self.context = context
def execute(self):
gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.context.master_port), utility=True)
from_host, from_path = self.host, self.path
logger.info("Commencing remote database dump file recovery process, please wait...")
segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True) or seg.isSegmentMaster()]
self.pool = WorkerPool(numWorkers=min(len(segs), self.context.batch_default))
for seg in segs:
to_host = seg.getSegmentHostName()
to_path = os.path.join(seg.getSegmentDataDirectory(), self.context.dump_dir, self.context.timestamp[0:8])
if seg.isSegmentMaster():
from_file = self.context.generate_filename("metadata")
to_file = self.context.generate_filename("metadata", directory=to_path)
else:
from_file = self.context.generate_filename("dump", dbid=seg.getSegmentDbId())
to_file = self.context.generate_filename("dump", dbid=seg.getSegmentDbId(), directory=to_path)
if not CheckRemoteDir(to_path, to_host).run():
logger.info('Creating directory %s on %s' % (to_path, to_host))
try:
MakeRemoteDir(to_path, to_host).run()
except OSError, e:
raise ExceptionNoStackTraceNeeded("Failed to create directory %s on %s" % (to_path, to_host))
logger.info("Commencing remote copy from %s to %s:%s" % (from_host, to_host, to_path))
self.pool.addCommand(Scp('Copying dump for seg %d' % seg.getSegmentDbId(),
srcFile=from_file,
dstFile=to_file,
srcHost=from_host,
dstHost=to_host))
self.pool.addCommand(Scp('Copying schema dump',
srcHost=from_host,
srcFile=self.context.generate_filename("cdatabase", directory=from_path),
dstFile=self.context.generate_filename("cdatabase")))
self.pool.addCommand(Scp('Copying report file',
srcHost=from_host,
srcFile=self.context.generate_filename("report", directory=from_path),
dstFile=self.context.generate_filename("report")))
self.pool.addCommand(Scp('Copying post data schema dump',
srcHost=from_host,
srcFile=self.context.generate_filename("postdata", directory=from_path),
dstFile=self.context.generate_filename("postdata")))
if self.context.restore_global:
self.pool.addCommand(Scp("Copying global dump",
srcHost=from_host,
srcFile=self.context.generate_filename("global", directory=from_path),
dstFile=self.context.generate_filename("global")))
self.pool.join()
self.pool.check_results()
class GetDumpTablesOperation(Operation):
def __init__(self, context):
self.context = context
self.grep_cmdStr = ''' | grep -e "-- Name: " -e "^\W*START (" -e "^\W*PARTITION " -e "^\W*DEFAULT PARTITION " -e "^\W*SUBPARTITION " -e "^\W*DEFAULT SUBPARTITION "'''
self.gunzip_maybe = ' | gunzip' if self.context.compress else ''
def extract_dumped_tables(self, lines):
schema = ''
owner = ''
table = ''
ret = []
for line in lines:
if line.startswith("-- Name: "):
table, table_type, schema, owner = get_table_info(line)
if table_type in ["TABLE", "EXTERNAL TABLE"]:
ret.append((schema, table, owner))
else:
line = line.strip()
if (line.startswith("START (") or line.startswith("DEFAULT PARTITION ") or line.startswith("PARTITION ") or
line.startswith("SUBPARTITION ") or line.startswith("DEFAULT SUBPARTITION ")):
keyword = " WITH \(tablename=E"
# minus the length of keyword below as we escaped '(' with an extra back slash (\)
pos = get_nonquoted_keyword_index(line, keyword, "'", len(keyword) - 1)
if pos == -1:
keyword = " WITH \(tablename="
pos = get_nonquoted_keyword_index(line, keyword, "'", len(keyword) - 1)
if pos == -1:
continue
# len(keyword) plus one to not include the first single quote
table = line[pos + len(keyword) : line.rfind("'")]
# unescape table name to get the defined name in database
table = unescape_string(table)
ret.append((schema, table, owner))
return ret
class GetDDboostDumpTablesOperation(GetDumpTablesOperation):
def __init__(self, context):
self.context = context
super(GetDDboostDumpTablesOperation, self).__init__(context)
def execute(self):
ddboost_cmdStr = 'gpddboost --readFile --from-file=%s' % self.context.generate_filename("dump")
if self.ddboost_storage_unit:
ddboost_cmdStr += ' --ddboost-storage-unit=%s' % self.ddboost_storage_unit
cmdStr = ddboost_cmdStr + self.gunzip_maybe + self.grep_cmdStr
cmd = Command('DDBoost copy of master dump file', cmdStr)
cmd.run(validateAfter=True)
line_list = cmd.get_results().stdout.splitlines()
ret = self.extract_dumped_tables(line_list)
return ret
class GetNetBackupDumpTablesOperation(GetDumpTablesOperation):
def __init__(self, context):
self.context = context
super(GetNetBackupDumpTablesOperation, self).__init__(context)
def execute(self):
nbu_cmdStr = 'gp_bsa_restore_agent --netbackup-service-host %s --netbackup-filename %s' % (self.context.netbackup_service_host, self.context.generate_filename("dump"))
cmdStr = nbu_cmdStr + self.gunzip_maybe + self.grep_cmdStr
cmd = Command('NetBackup copy of master dump file', cmdStr)
cmd.run(validateAfter=True)
line_list = cmd.get_results().stdout.splitlines()
ret = self.extract_dumped_tables(line_list)
return ret
class GetLocalDumpTablesOperation(GetDumpTablesOperation):
def __init__(self, context):
self.context = context
super(GetLocalDumpTablesOperation, self).__init__(context)
def execute(self):
f = None
try:
dump_file = self.context.generate_filename("dump")
if self.context.compress:
f = gzip.open(dump_file, 'r')
else:
f = open(dump_file, 'r')
lines = f.readlines()
ret = self.extract_dumped_tables(lines)
return ret
finally:
if f:
f.close()
class GetRemoteDumpTablesOperation(GetDumpTablesOperation):
def __init__(self, context, remote_host):
self.context = context
self.host = remote_host
super(GetRemoteDumpTablesOperation, self).__init__(context)
def execute(self):
cat_cmdStr = 'cat %s' % self.context.generate_filename("dump")
get_remote_dump_tables = '''ssh %s %s%s''' % (self.host, cat_cmdStr, self.grep_cmdStr)
cmd = Command('Get remote copy of dumped tables', get_remote_dump_tables)
cmd.run(validateAfter=True)
line_list = cmd.get_results().stdout.splitlines()
return self.extract_dumped_tables(line_list)
class GetDumpTables():
def __init__(self, context, remote_host=None):
self.context = context
self.remote_hostname = remote_host
def get_dump_tables(self):
if self.context.ddboost:
get_dump_table_cmd = GetDDboostDumpTablesOperation(self.context)
elif self.context.netbackup_service_host:
get_dump_table_cmd = GetNetBackupDumpTablesOperation(self.context)
elif self.remote_hostname:
get_dump_table_cmd = GetRemoteDumpTablesOperation(self.context, self.remote_hostname)
else:
get_dump_table_cmd = GetLocalDumpTablesOperation(self.context)
return get_dump_table_cmd.run()
|
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import dates
from matplotlib import patches
import os
import pickle
from datetime import datetime
import time
from pprint import pprint
import sys
from datetime import timedelta
from netCDF4 import Dataset
import numpy.ma as ma
from mpl_toolkits.basemap import Basemap, shiftgrid
import Etopo1map
import laplaceFilter
import mpl_util
from matplotlib import cm
am_start_time = 3
am_end_time = 6
pm_start_time = 15
pm_end_time = 18
#months_of_interest = [6,7,8,9]
months_of_interest = [1,2,3,4,5,6,7,8,9,10,11,12]
stations_to_use = [
'SAN BERNARDINO INTERNATIONAL A' ,
'SAN BERNARDINO INTL' ,
'RIVERSIDE MUNICIPAL ARPT' ,
'RIVERSIDE MUNI' ,
'DOWNTOWN L.A./USC CAMPUS' ,
'LONG BEACH / DAUGHERTY FIELD /' ,
'FULLERTON MUNICIPAL ARPT' ,
'FULLERTON MUNICIPAL' ,
]
#location: USAF,country, lat, long, elev ,daytime RH, nightime RH, timezone, wet season RH avgs, dry season RH avgs
stations = {
'SAN BERNARDINO INTERNATIONAL A': [722866,'UNITED STATES', 34.095, -117.235, 0353.3,{},{},-8,[],[],'red'],
'SAN BERNARDINO INTL' : [722866,'UNITED STATES', 34.095, -117.235, 0353.3,{},{},-8,[],[],'red'],
'RIVERSIDE MUNICIPAL ARPT' : [722869,'UNITED STATES', 33.952, -117.439, 0245.2,{},{},-8,[],[],'blue'],
'RIVERSIDE MUNI' : [722869,'UNITED STATES', 33.950, -117.433, 0249.0,{},{},-8,[],[],'blue'],
'DOWNTOWN L.A./USC CAMPUS' : [722874,'UNITED STATES', 34.024, -118.291, 0054.6,{},{},-8,[],[],'green'],
'BRACKETT FIELD AIRPORT' : [722887,'UNITED STATES', 34.100, -117.783, 0308.2,{},{},-8,[],[],'red'],
'BRACKETT FLD' : [722887,'UNITED STATES', 34.083, -117.783, 0308.0,{},{},-8,[],[],'red'],
'LONG BEACH / DAUGHERTY FIELD /': [722970,'UNITED STATES', 33.812, -118.146, 0009.5,{},{},-8,[],[],'magenta'],
'FULLERTON MUNICIPAL ARPT' : [722976,'UNITED STATES', 33.872, -117.979, 0029.3,{},{},-8,[],[],'cyan'],
'FULLERTON MUNICIPAL' : [722976,'UNITED STATES', 33.867, -117.983, 0029.0,{},{},-8,[],[],'cyan'],
}
file = 'C:/Users/Sarah Hanna/Documents/Data/Toluene SOM/NOAA NCDC RH Data/309596598657dat.txt'
with open(file, 'r') as f:
f.readline()
f.readline()
for line in f:
newline = line.split(',')
stn = newline[0].rstrip()
day = newline[3]
hour = newline[4] # in UTC
temp_qc = newline[9]
dewp_qc = newline[11]
temp = float(newline[8])
RHx = float(newline[12])
timezone = stations[stn][7]
date = datetime.strptime(day, '%Y%m%d')
datetime_local = datetime.strptime(day + ' ' + hour, '%Y%m%d %H%M') + timedelta(hours = timezone)
if stn in stations.keys():
#data QC
if temp_qc in ['1','5'] and dewp_qc in ['1','5']: #data has 'Passed all quality control checks'
am_data = stations[stn][5]
pm_data = stations[stn][6]
#get am data
if datetime_local.hour >= am_start_time and datetime_local.hour < am_end_time:
if date in am_data:
am_data[date].append(RHx)
else:
am_data[date] = [RHx]
#get pm data
if datetime_local.hour >= pm_start_time and datetime_local.hour < pm_end_time:
if date in pm_data:
pm_data[date].append(RHx)
else:
pm_data[date] = [RHx]
for stn in stations:
print '\n'
print stn
am_data = stations[stn][5]
pm_data = stations[stn][6]
if stn == 'SAN BERNARDINO INTERNATIONAL A':
summer_am_avgs = stations['SAN BERNARDINO INTL'][8]
summer_pm_avgs = stations['SAN BERNARDINO INTL'][9]
elif stn == 'RIVERSIDE MUNICIPAL ARPT':
summer_am_avgs = stations['RIVERSIDE MUNI'][8]
summer_pm_avgs = stations['RIVERSIDE MUNI'][9]
elif stn == 'BRACKETT FIELD AIRPORT':
summer_am_avgs = stations['BRACKETT FLD'][8]
summer_pm_avgs = stations['BRACKETT FLD'][9]
elif stn == 'FULLERTON MUNICIPAL ARPT':
summer_am_avgs = stations['FULLERTON MUNICIPAL'][8]
summer_pm_avgs = stations['FULLERTON MUNICIPAL'][9]
else:
summer_am_avgs = stations[stn][8]
summer_pm_avgs = stations[stn][9]
night_count = 0
day_count = 0
for date, RHs in pm_data.iteritems():
pm_avg_RH = np.mean(RHs)
night_count +=1
if date.month in months_of_interest:
summer_pm_avgs.append(pm_avg_RH)
for date, RHs in am_data.iteritems():
am_avg_RH = np.mean(RHs)
day_count +=1
if date.month in months_of_interest:
summer_am_avgs.append(am_avg_RH)
print 'am data points', len(summer_am_avgs)
print 'am median', np.median(summer_am_avgs)
print 'pm data points', len(summer_pm_avgs)
print 'pm median', np.median(summer_pm_avgs)
#Plotting
fig1 = plt.figure(figsize=(12,12))
bins_to_use = [0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100]
label_x_pos = 0.05
label_y_pos = 0.85
see_Y_axis = True
y_upper_lim = 850
ax1w = plt.subplot2grid((5,4), (0,0), colspan=1)
ax1d = plt.subplot2grid((5,4), (0,1), colspan=1)
ax2w = plt.subplot2grid((5,4), (1,0), colspan=1)
ax2d = plt.subplot2grid((5,4), (1,1), colspan=1)
ax3w = plt.subplot2grid((5,4), (2,0), colspan=1)
ax3d = plt.subplot2grid((5,4), (2,1), colspan=1)
ax5w = plt.subplot2grid((5,4), (3,0), colspan=1)
ax5d = plt.subplot2grid((5,4), (3,1), colspan=1)
ax6w = plt.subplot2grid((5,4), (4,0), colspan=1)
ax6d = plt.subplot2grid((5,4), (4,1), colspan=1)
n,bins,patches = ax1w.hist(stations[stations_to_use[1]][8],bins_to_use, color=stations[stations_to_use[1]][10])
ax1w.yaxis.set_visible(see_Y_axis)
ax1w.text(label_x_pos, label_y_pos,'SAN BERNARDINO', transform=ax1w.transAxes)
ax1w.set_xlim(0,100)
ax1w.xaxis.set_label_position('top')
ax1w.set_xlabel('AM')
ax1w.set_ylim(0,y_upper_lim)
ax1w.xaxis.set_ticklabels([])
ax1w.xaxis.grid()
n,bins,patches = ax1d.hist(stations[stations_to_use[1]][9],bins_to_use, color=stations[stations_to_use[1]][10])
ax1d.yaxis.set_visible(see_Y_axis)
ax1d.set_xlim(0,100)
ax1d.xaxis.set_label_position('top')
ax1d.set_xlabel('PM')
ax1d.set_ylim(0,y_upper_lim)
ax1d.yaxis.set_visible(False)
ax1d.xaxis.set_ticklabels([])
ax1d.xaxis.grid()
n,bins,patches = ax2w.hist(stations[stations_to_use[3]][8],bins_to_use, color=stations[stations_to_use[3]][10])
ax2w.yaxis.set_visible(see_Y_axis)
ax2w.text(label_x_pos, label_y_pos,'RIVERSIDE', transform=ax2w.transAxes)
ax2w.set_xlim(0,100)
ax2w.set_ylim(0,y_upper_lim)
ax2w.xaxis.tick_top()
ax2w.xaxis.set_ticklabels([])
ax2w.xaxis.grid()
n,bins,patches = ax2d.hist(stations[stations_to_use[3]][9],bins_to_use, color=stations[stations_to_use[3]][10])
ax2d.xaxis.set_ticklabels([])
ax2d.yaxis.set_visible(see_Y_axis)
ax2d.set_xlim(0,100)
ax2d.set_ylim(0,y_upper_lim)
ax2d.yaxis.set_visible(False)
ax2d.xaxis.grid()
n,bins,patches = ax3w.hist(stations[stations_to_use[4]][8],bins_to_use, color=stations[stations_to_use[4]][10])
ax3w.yaxis.set_visible(see_Y_axis)
ax3w.text(label_x_pos, 0.75,'DOWNTOWN L.A.'+'\n'+'/USC CAMPUS', transform=ax3w.transAxes)
ax3w.set_xlim(0,100)
ax3w.set_ylim(0,y_upper_lim)
ax3w.xaxis.tick_top()
ax3w.xaxis.set_ticklabels([])
ax3w.xaxis.grid()
n,bins,patches = ax3d.hist(stations[stations_to_use[4]][9],bins_to_use, color=stations[stations_to_use[4]][10])
ax3d.xaxis.set_ticklabels([])
ax3d.yaxis.set_visible(see_Y_axis)
ax3d.set_xlim(0,100)
ax3d.set_ylim(0,y_upper_lim)
ax3d.yaxis.set_visible(False)
ax3d.xaxis.grid()
n,bins,patches = ax5w.hist(stations[stations_to_use[5]][8],bins_to_use, color=stations[stations_to_use[5]][10])
ax5w.yaxis.set_visible(see_Y_axis)
ax5w.text(label_x_pos, label_y_pos,'LONG BEACH', transform=ax5w.transAxes)
ax5w.set_xlim(0,100)
ax5w.set_ylim(0,y_upper_lim)
ax5w.xaxis.grid()
ax5w.xaxis.set_ticklabels([])
n,bins,patches = ax5d.hist(stations[stations_to_use[5]][9],bins_to_use, color=stations[stations_to_use[5]][10])
ax5d.yaxis.set_visible(see_Y_axis)
ax5d.set_xlabel('%RH')
ax5d.set_xlim(0,100)
ax5d.set_ylim(0,y_upper_lim)
ax5d.xaxis.set_ticklabels([])
ax5d.yaxis.set_visible(False)
ax5d.xaxis.grid()
n,bins,patches = ax6w.hist(stations[stations_to_use[7]][8],bins_to_use, color=stations[stations_to_use[7]][10])
ax6w.yaxis.set_visible(see_Y_axis)
ax6w.text(label_x_pos, label_y_pos,'FULLERTON', transform=ax6w.transAxes)
ax6w.set_xlim(0,100)
ax6w.set_ylim(0,y_upper_lim)
ax6w.set_xlabel('%RH')
ax6w.xaxis.grid()
n,bins,patches = ax6d.hist(stations[stations_to_use[7]][9],bins_to_use, color=stations[stations_to_use[7]][10])
ax6d.yaxis.set_visible(see_Y_axis)
ax6d.set_xlabel('%RH')
ax6d.set_xlim(0,100)
ax6d.set_ylim(0,y_upper_lim)
ax6d.yaxis.set_visible(False)
ax6d.xaxis.grid()
#####map
ax9 = plt.subplot2grid((5,4), (0,2), colspan=2, rowspan=5)
latStart = 32
latEnd = 36
lonStart = -119.5
lonEnd = -116.5
#Get the etopo2 data#
etopo1name='C:/Users/Sarah Hanna/Documents/Data/Toluene SOM/etopo/ETOPO1_Ice_g_gmt4.grd'
etopo1 = Dataset(etopo1name,'r')
#print type(etopo1.variables)
#for item, value in etopo1.variables.iteritems():
# print item, value
lons = etopo1.variables['x'][:]
lats = etopo1.variables['y'][:]
res = Etopo1map.findSubsetIndices(latStart-5,latEnd+5,lonStart-40,lonEnd+10,lats,lons)
lon,lat=np.meshgrid(lons[res[0]:res[1]],lats[res[2]:res[3]])
#topo_bathy = etopo1.variables['z'][int(res[2]):int(res[3]),int(res[0]):int(res[1])] #z is in meters
#topo_bathySmoothed = laplaceFilter.laplace_filter(topo_bathy,M=None)
topo_bathySmoothed = etopo1.variables['z'][int(res[2]):int(res[3]),int(res[0]):int(res[1])] #z is in meters
print np.min(topo_bathySmoothed),np.max(topo_bathySmoothed)
levels_list = []
for x in range(0,2200,200):
levels_list.append(x)
if lonStart< 0 and lonEnd < 0:
lon_0= - (abs(lonEnd)+abs(lonStart))/2.0
else:
lon_0=(abs(lonEnd)+abs(lonStart))/2.0
print 'Center longitude ',lon_0
map = Basemap(llcrnrlat=latStart,urcrnrlat=latEnd,\
llcrnrlon=lonStart,urcrnrlon=lonEnd,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',area_thresh=100.,projection='lcc',\
lat_1=latStart,lon_0=lon_0)
x, y = map(lon,lat)
map.drawmapboundary(fill_color='#81BEF7')
#map.drawcoastlines()
map.drawcountries()
#map.drawstates()
#map.drawrivers()
#map.fillcontinents(color='white',zorder=0)
map.drawmeridians(np.arange(lons.min(),lons.max(),1),labels=[0,0,0,1])
map.drawparallels(np.arange(lats.min(),lats.max(),1),labels=[1,0,0,0])
CS1 = map.contourf(x,y,topo_bathySmoothed,levels_list,
cmap=mpl_util.LevelColormap(levels_list,cmap=cm.Greens),
extend='max',
alpha=1.,
origin='lower')
CS1.axis='tight'
cbar = plt.colorbar(CS1, orientation = 'horizontal', use_gridspec = True)
cbar.set_label('Altitude (m asl)')
cbar.set_ticklabels([0 ,400,1000, 1500,3000], update_ticks=True)
i=0
stn_lons = []
stn_lats = []
for key, value in stations.iteritems():
if key in stations_to_use:
stn_lons.append(value[3])
stn_lats.append(value[2])
#if i==0:
# xn,yn = map(value[3], value[2]+0.04)
# plt.text(xn,yn, 'Benito Juarez Intl')
#
#if i==1:
# xn,yn = map(value[3]-0.75, value[2]-0.1)
# plt.text(xn,yn, 'Mexico (Central) D.F.')
x,y = map(value[3], value[2])
map.plot(x,y, color=stations[key][10], marker='o', linestyle = 'None', markersize = 8)
i+=1
#####high res shapefiles for presentation purposes
road_info = map.readshapefile('C:/Users/Sarah Hanna/Documents/Data/Toluene SOM/shp/NA_road_atlas/road00l_shp/road_l','road',drawbounds=True) #GADM Data http://www.gadm.org/country
urbanarea_info = map.readshapefile('C:/Users/Sarah Hanna/Documents/Data/Toluene SOM/shp/Cal_2010_adjusted_urban_area/2010_adjusted_urban_area','urban',drawbounds=True) #US census bureau
USA_info = map.readshapefile('C:/Users/Sarah Hanna/Documents/Data/Toluene SOM/shp/US_coast/coastl_usa','USA',drawbounds=True) #GADM Data http://www.gadm.org/country
for nshape,seg in enumerate(map.road):
xx,yy = zip(*seg)
color = '#404040'
plt.plot(xx,yy,color=color)
for nshape,seg in enumerate(map.urban):
xx,yy = zip(*seg)
plt.fill(xx,yy,color = 'gray', alpha = 0.3, zorder=1)
for nshape,seg in enumerate(map.USA):
xx,yy = zip(*seg)
plt.plot(xx,yy,color = 'k', zorder=1)
#showing
map.drawmapscale(lonStart+0.75, latStart+0.5, lon_0, latStart, 100, barstyle='simple', units='km', fontsize=9, yoffset=None, labelstyle='simple', fontcolor='k', fillcolor1='w', fillcolor2='k', ax=None, format='%d', zorder=None)
plt.subplots_adjust(hspace=0.15)
plt.savefig('C:/Users/Sarah Hanna/Documents/Data/Toluene SOM/NOAA NCDC RH Data/LA_area_RH.png', bbox_inches='tight')
plt.show()
|
|
__description__ = \
"""
Control an automatic door via an arudino interface. The arduino is expected to
have an "open" sensor, a "closed" sensor, and an ambient light sensor.
"""
__author__ = "Michael J. Harms"
__date__ = "2017-09-23"
import PyCmdMessenger
import serial
import os, time, logging
class DoorException(Exception):
"""
General error class for this module.
"""
def __init__(self,*args,**kwargs):
logging.warning(args[0])
super().__init__(*args,**kwargs)
class Door:
"""
Automatic door, controlled by arduino.
"""
def __init__(self,
device_name,
device_tty=None,
door_move_time=5):
"""
device_name: name that the device will return if pinged by "who_are_you"
arduino sketch
device_tty: serial device for arduino. if None, look for the device under
/dev/ttyA* devices
door_move_time: time to wait for door before checking on it (s)
"""
self._COMMANDS = (("who_are_you",""),
("query",""),
("open_door",""),
("close_door",""),
("who_are_you_return","s"),
("query_return","iii"),
("door_open_return","i"),
("door_close_return","i"),
("communication_error","s"))
self._BAUD_RATE = 9600
self._device_name = device_name
self._device_tty = device_tty
self._door_move_time = door_move_time
self._last_check = -1
# Status that indicates whether the software device has actually found
# any hardware.
self._hardware_is_found = False
# Try to connect to specified device
if self._device_tty is not None:
try:
self._arudino_raw_serial = PyCmdMessenger.ArduinoBoard(self._device_tty,
baud_rate=self._BAUD_RATE)
self._arduino_msg = PyCmdMessenger.CmdMessenger(self._arduino_raw_serial,
self._COMMANDS)
self._hardware_is_found = True
except:
pass
# Or look for the device
else:
self._find_serial()
if not self._hardware_is_found:
err = "Could not connect to door hardware.\n"
raise DoorException(err)
# Find current state of the system
self._query()
def _find_serial(self):
"""
Search through attached serial devices until one reports the specified
self._device_name when probed by "who_are_you".
"""
# if there is already a serial connection, move on
if self._hardware_is_found:
return
tty_devices = [d for d in os.listdir("/dev") if d.startswith("ttyA")]
for d in tty_devices:
try:
tmp_tty = os.path.join("/dev",d)
a = PyCmdMessenger.ArduinoBoard(tmp_tty,self._BAUD_RATE)
cmd = PyCmdMessenger.CmdMessenger(a,self._COMMANDS)
cmd.send("who_are_you")
reply = cmd.receive()
if reply != None:
if reply[0] == "who_are_you_return":
if reply[1][0] == self._device_name:
self._arduino_raw_serial = a
self._arduino_msg = cmd
self._device_tty = tmp_tty
self._hardware_is_found = True
break
# something went wrong ... not a device we can use.
except IndexError:
pass
def _query(self):
"""
Return sensor status.
"""
# Query arduino
self._arduino_msg.send("query")
result = self._arduino_msg.receive()
if result[0] != "query_return":
err = "door query failed.\n"
raise DoorException(err)
# Parse result
self._ambient_light = result[1][0]
self._open_sensor = result[1][1]
self._closed_sensor = result[1][2]
# interpret result
if self._open_sensor < 200 and self._closed_sensor > 900:
self._door_state = "open"
elif self._open_sensor > 900 and self._closed_sensor < 200:
self._door_state = "closed"
else:
self._door_state = "unknown"
self._last_check = time.time()
def open_door(self):
"""
Open the door.
"""
# send open door command
self._arduino_msg.send("open_door")
time.sleep(self._door_move_time)
# send status check and make sure it's open
self._query()
if self._door_state != "open":
err = "Door is reading {} after open request.\n".format(self._door_state)
raise DoorException(err)
def close_door(self):
"""
Close the door.
"""
# send close door command
self._arduino_msg.send("close_door")
time.sleep(self._door_move_time)
# send forced status check and make sure it's open
self._query()
if self._door_state != "closed":
err = "Door is reading {} after close request.\n".format(self._door_state)
raise DoorException(err)
@property
def comm_state(self):
"""
Return tuple. First entry is True/False (indicating good comms or not).
Second entry is string describing current state.
"""
if not self._hardware_is_found:
return False, "No hardware found."
try:
self._arduino_msg.send("who_are_you")
result = self._arduino_msg.receive()
if result[0] != "who_are_you_return" or result[1][0] != self._device_name:
err = "device name has changed to {}\n".format(result[1][0])
return False, err
except serial.serialutil.SerialException:
err = "serial connection lost\n"
return False, err
return True, "Connected."
@property
def door_state(self):
"""
Check current status and return "open", "closed", "unknown".
"""
self._query()
return self._door_state
@property
def ambient_light(self):
"""
Return current light level.
"""
self._query()
return self._ambient_light
@property
def open_sensor(self):
"""
Return current reading from open sensor.
"""
self._query()
return self._open_sensor
@property
def closed_sensor(self):
"""
Return current reading from closed sensor.
"""
self._query()
return self._closed_sensor
@property
def last_check(self):
"""
Return last time system was checked.
"""
return self._last_check
@property
def web_content(self):
"""
"""
out = []
out.append('<div class="well">')
#out.append('<div class="col-xs-6 col-m-3")
#current_state = self.door_date
#img_file = os.path.join("img","door_{}.png".format(current_state))
#out.append('<img class="img-responsive" src="{}" />'.format(img_file))
|
|
import re
from lib import BaseTest
class CreateMirror1Test(BaseTest):
"""
create mirror: all architectures + all components
"""
runCmd = "aptly mirror create --ignore-signatures mirror1 http://mirror.yandex.ru/debian/ wheezy"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror1", "mirror_show")
class CreateMirror2Test(BaseTest):
"""
create mirror: all architectures and 1 component
"""
runCmd = "aptly mirror create --ignore-signatures mirror2 http://mirror.yandex.ru/debian/ wheezy main"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror2", "mirror_show")
class CreateMirror3Test(BaseTest):
"""
create mirror: some architectures and 2 components
"""
runCmd = "aptly -architectures=i386,amd64 mirror create --ignore-signatures mirror3 http://mirror.yandex.ru/debian/ wheezy main contrib"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror3", "mirror_show")
class CreateMirror4Test(BaseTest):
"""
create mirror: missing component
"""
expectedCode = 1
runCmd = "aptly -architectures=i386,amd64 mirror create --ignore-signatures mirror4 http://mirror.yandex.ru/debian/ wheezy life"
class CreateMirror5Test(BaseTest):
"""
create mirror: missing architecture
"""
expectedCode = 1
runCmd = "aptly -architectures=i386,nano68 mirror create --ignore-signatures mirror5 http://mirror.yandex.ru/debian/ wheezy"
class CreateMirror6Test(BaseTest):
"""
create mirror: missing release
"""
expectedCode = 1
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror6 http://mirror.yandex.ru/debian/ suslik"
class CreateMirror7Test(BaseTest):
"""
create mirror: architectures fixed via config file
"""
runCmd = "aptly mirror create --ignore-signatures mirror7 http://mirror.yandex.ru/debian/ wheezy main contrib"
configOverride = {"architectures": ["i386", "amd64"]}
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror7", "mirror_show")
class CreateMirror8Test(BaseTest):
"""
create mirror: already exists
"""
fixtureCmds = [
"aptly mirror create --ignore-signatures mirror8 http://mirror.yandex.ru/debian/ wheezy main contrib"
]
runCmd = "aptly mirror create --ignore-signatures mirror8 http://mirror.yandex.ru/debian/ wheezy main contrib"
expectedCode = 1
class CreateMirror9Test(BaseTest):
"""
create mirror: repo with InRelease verification
"""
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror9 http://mirror.yandex.ru/debian-backports/ squeeze-backports"
fixtureGpg = True
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using|Warning: using insecure memory!\n', '', s)
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror9", "mirror_show", match_prepare=removeDates)
class CreateMirror10Test(BaseTest):
"""
create mirror: repo with InRelease verification, failure
"""
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror10 http://mirror.yandex.ru/debian-backports/ squeeze-backports"
fixtureGpg = False
gold_processor = BaseTest.expand_environ
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using|gpgv: keyblock resource .*$|gpgv: Can\'t check signature: .*$', '', s, flags=re.MULTILINE)
expectedCode = 1
class CreateMirror11Test(BaseTest):
"""
create mirror: repo with Release + Release.gpg verification
"""
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror11 http://mirror.yandex.ru/debian/ squeeze"
fixtureGpg = True
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror11", "mirror_show")
class CreateMirror12Test(BaseTest):
"""
create mirror: repo with Release+Release.gpg verification, failure
"""
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror12 http://mirror.yandex.ru/debian/ squeeze"
fixtureGpg = False
gold_processor = BaseTest.expand_environ
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using|gpgv: keyblock resource .*$|gpgv: Can\'t check signature: .*$', '', s, flags=re.MULTILINE)
expectedCode = 1
class CreateMirror13Test(BaseTest):
"""
create mirror: skip verification using config file
"""
runCmd = "aptly mirror create mirror13 http://mirror.yandex.ru/debian/ wheezy"
configOverride = {"gpgDisableVerify": True}
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror13", "mirror_show")
class CreateMirror14Test(BaseTest):
"""
create mirror: flat repository
"""
runCmd = "aptly mirror create -keyring=aptlytest.gpg mirror14 http://download.opensuse.org/repositories/home:/monkeyiq/Debian_7.0/ ./"
fixtureGpg = True
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror14", "mirror_show")
class CreateMirror15Test(BaseTest):
"""
create mirror: flat repository + components
"""
runCmd = "aptly mirror create -keyring=aptlytest.gpg mirror14 http://download.opensuse.org/repositories/home:/monkeyiq/Debian_7.0/ ./ main"
expectedCode = 1
class CreateMirror16Test(BaseTest):
"""
create mirror: there's no "source" architecture
"""
expectedCode = 1
runCmd = "aptly -architectures=source mirror create -ignore-signatures mirror16 http://mirror.yandex.ru/debian/ wheezy"
class CreateMirror17Test(BaseTest):
"""
create mirror: mirror with sources enabled
"""
runCmd = "aptly -architectures=i386 mirror create -ignore-signatures -with-sources mirror17 http://mirror.yandex.ru/debian/ wheezy"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror17", "mirror_show")
class CreateMirror18Test(BaseTest):
"""
create mirror: mirror with ppa URL
"""
fixtureGpg = True
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
configOverride = {
"ppaDistributorID": "ubuntu",
"ppaCodename": "maverick",
}
runCmd = "aptly mirror create -keyring=aptlytest.gpg mirror18 ppa:gladky-anton/gnuplot"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror18", "mirror_show")
class CreateMirror19Test(BaseTest):
"""
create mirror: mirror with / in distribution
"""
fixtureGpg = True
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
runCmd = "aptly -architectures='i386' mirror create -keyring=aptlytest.gpg -with-sources mirror19 http://security.debian.org/ wheezy/updates main"
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror19", "mirror_show", match_prepare=removeDates)
class CreateMirror20Test(BaseTest):
"""
create mirror: using failing HTTP_PROXY
"""
fixtureGpg = True
runCmd = "aptly -architectures='i386' mirror create -keyring=aptlytest.gpg -with-sources mirror20 http://security.debian.org/ wheezy/updates main"
environmentOverride = {"HTTP_PROXY": "127.0.0.1:3137"}
expectedCode = 1
class CreateMirror21Test(BaseTest):
"""
create mirror: flat repository in subdir
"""
runCmd = "aptly mirror create -keyring=aptlytest.gpg mirror21 http://pkg.jenkins-ci.org/debian-stable binary/"
fixtureGpg = True
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
def check(self):
def removeSHA512(s):
return re.sub(r"SHA512: .+\n", "", s)
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror21", "mirror_show", match_prepare=lambda s: removeSHA512(removeDates(s)))
class CreateMirror22Test(BaseTest):
"""
create mirror: mirror with filter
"""
runCmd = "aptly mirror create -ignore-signatures -filter='nginx | Priority (required)' mirror22 http://security.debian.org/ wheezy/updates main"
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror22", "mirror_show", match_prepare=removeDates)
class CreateMirror23Test(BaseTest):
"""
create mirror: mirror with wrong filter
"""
runCmd = "aptly mirror create -ignore-signatures -filter='nginx | ' mirror23 http://security.debian.org/ wheezy/updates main"
expectedCode = 1
class CreateMirror24Test(BaseTest):
"""
create mirror: disable config value with option
"""
runCmd = "aptly mirror create -ignore-signatures=false -keyring=aptlytest.gpg mirror24 http://security.debian.org/ wheezy/updates main"
fixtureGpg = True
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
configOverride = {
"gpgDisableVerify": True
}
class CreateMirror25Test(BaseTest):
"""
create mirror: mirror with udebs enabled
"""
runCmd = "aptly -architectures=i386 mirror create -ignore-signatures -with-udebs mirror25 http://mirror.yandex.ru/debian/ wheezy"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror25", "mirror_show")
class CreateMirror26Test(BaseTest):
"""
create mirror: flat mirror with udebs
"""
runCmd = "aptly mirror create -keyring=aptlytest.gpg -with-udebs mirror26 http://pkg.jenkins-ci.org/debian-stable binary/"
fixtureGpg = True
expectedCode = 1
class CreateMirror27Test(BaseTest):
"""
create mirror: component with slashes, no stripping
"""
runCmd = "aptly mirror create --ignore-signatures mirror27 http://linux.dell.com/repo/community/ubuntu wheezy openmanage/740"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror27", "mirror_show")
class CreateMirror28Test(BaseTest):
"""
create mirror: -force-components
"""
runCmd = "aptly mirror create -ignore-signatures -force-components mirror28 http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen"
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror28", "mirror_show", match_prepare=removeDates)
|
|
"""
:class:`.OpenCage` is the Opencagedata geocoder.
"""
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT, DEFAULT_SCHEME
from geopy.exc import (
GeocoderQueryError,
GeocoderQuotaExceeded,
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("OpenCage", )
class OpenCage(Geocoder):
"""
Geocoder using the Open Cage Data API. Documentation at:
http://geocoder.opencagedata.com/api.html
..versionadded:: 1.1.0
"""
def __init__(
self,
api_key,
domain='api.opencagedata.com',
scheme=DEFAULT_SCHEME,
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None,
): # pylint: disable=R0913
"""
Initialize a customized Open Cage Data geocoder.
:param string api_key: The API key required by Open Cage Data
to perform geocoding requests. You can get your key here:
https://developer.opencagedata.com/
:param string domain: Currently it is 'api.opencagedata.com', can
be changed for testing purposes.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
"""
super(OpenCage, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent
)
self.api_key = api_key
self.domain = domain.strip('/')
self.scheme = scheme
self.api = '%s://%s/geocode/v1/json' % (self.scheme, self.domain)
def geocode(
self,
query,
bounds=None,
country=None,
language=None,
exactly_one=True,
timeout=None,
): # pylint: disable=W0221,R0913
"""
Geocode a location query.
:param string query: The query string to be geocoded; this must
be URL encoded.
:param string language: an IETF format language code (such as `es`
for Spanish or pt-BR for Brazilian Portuguese); if this is
omitted a code of `en` (English) will be assumed by the remote
service.
:param string bounds: Provides the geocoder with a hint to the region
that the query resides in. This value will help the geocoder
but will not restrict the possible results to the supplied
region. The bounds parameter should be specified as 4
coordinate points forming the south-west and north-east
corners of a bounding box. For example,
`bounds=-0.563160,51.280430,0.278970,51.683979`.
:param string country: Provides the geocoder with a hint to the
country that the query resides in. This value will help the
geocoder but will not restrict the possible results to the
supplied country. The country code is a 3 character code as
defined by the ISO 3166-1 Alpha 3 standard.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'q': self.format_string % query,
}
if bounds:
params['bounds'] = bounds
if bounds:
params['language'] = language
if bounds:
params['country'] = country
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def reverse(
self,
query,
language=None,
exactly_one=False,
timeout=None,
): # pylint: disable=W0221,R0913
"""
Given a point, find an address.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param string language: The language in which to return results.
:param boolean exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'q': self._coerce_point_to_string(query),
}
if language:
params['language'] = language
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def _parse_json(self, page, exactly_one=True):
'''Returns location, (latitude, longitude) from json feed.'''
places = page.get('results', [])
if not len(places):
self._check_status(page.get('status'))
return None
def parse_place(place):
'''Get the location, lat, lng from a single json place.'''
location = place.get('formatted')
latitude = place['geometry']['lat']
longitude = place['geometry']['lng']
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_place(places[0])
else:
return [parse_place(place) for place in places]
@staticmethod
def _check_status(status):
"""
Validates error statuses.
"""
status_code = status['code']
if status_code == 429:
# Rate limit exceeded
raise GeocoderQuotaExceeded(
'The given key has gone over the requests limit in the 24'
' hour period or has submitted too many requests in too'
' short a period of time.'
)
if status_code == 200:
# When there are no results, just return.
return
if status_code == 403:
raise GeocoderQueryError(
'Your request was denied.'
)
else:
raise GeocoderQueryError('Unknown error.')
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from contractor.task import base
from neutronclient.v2_0 import client as ne_client
from neutronclient.common import exceptions as ne_exceptions
LOG = logging.getLogger(__name__)
class NeutronTask(base.Task):
def __init__(self, runner, environment, store):
super(NeutronTask, self).__init__(runner, environment, store)
env_config = self._get_environment_config()
self.ne_client = ne_client.Client(
auth_url=env_config['credentials']['auth_url'],
username=env_config['credentials'].get('username', None),
user_id=env_config['credentials'].get('user_id', None),
password=env_config['credentials']['password'],
tenant_name=env_config['credentials'].get('project_name', None),
tenant_id=env_config['credentials'].get('project_id', None),
region_name=env_config['credentials'].get('region_name', None),
)
def _get_network_id_from_name(self, name):
for network in self.store['_os-neutron_networks']:
if network['name'] == name:
return network['id']
raise Exception('Failed to find network with name: %s', name)
def _get_subnet_id_from_name(self, name):
for subnet in self.store['_os-neutron_subnets']:
if subnet['name'] == name:
return subnet['id']
raise Exception('Failed to find subnet with name: %s', name)
def _get_router_id_from_name(self, name):
for router in self.store['_os-neutron_routers']:
if router['name'] == name:
return router['id']
raise Exception('Failed to find router with name: %s', name)
class RouterTask(NeutronTask):
provides = 'router'
depends = []
routers_to_create = None
routers_to_update = None
routers_to_destroy = None
def introspect(self):
routers = self.ne_client.list_routers()['routers']
self.store['_os-neutron_routers'] = routers
existing_routers = set([r['name'] for r in routers])
expected_routers = set(self._get_environment_config()['routers'].keys())
self.routers_to_create = expected_routers.difference(existing_routers)
self.routers_to_update = expected_routers.intersection(existing_routers)
self.routers_to_destroy = existing_routers.difference(expected_routers)
LOG.info('Router TODO - C(%d) U(%d) D(%d)',
len(self.routers_to_create),
len(self.routers_to_update),
len(self.routers_to_destroy))
def build(self):
router_config = self._get_environment_config()['routers']
for name in self.routers_to_create:
LOG.info('Creating router %s', name)
c = router_config[name]
body = {
'router': {
'name': name,
'external_gateway_info': c.get('external_gateway_info', {})
}
}
resp = self.ne_client.create_router(body=body)
LOG.info('Router %s created with id %s', name, resp['router']['id'])
self.store['_os-neutron_routers'].append(resp['router'])
def destroy(self):
for name in self.routers_to_destroy:
LOG.info('Destroying router %s', name)
for router in self.store['_os-neutron_routers']:
if router['name'] == name:
self.ne_client.delete_router(router['id'])
LOG.info('Router %s with id %s destroyed', name,
router['id'])
class NetworkTask(NeutronTask):
provides = 'network'
depends = ['router']
networks_to_create = None
networks_to_update = None
networks_to_destroy = None
def introspect(self):
networks = self.ne_client.list_networks()['networks']
self.store['_os-neutron_networks'] = networks
existing_networks = set([n['name'] for n in networks])
expected_networks = set(self._get_environment_config()['networks'].keys())
self.networks_to_create = expected_networks.difference(existing_networks)
self.networks_to_update = expected_networks.intersection(existing_networks)
self.networks_to_destroy = existing_networks.difference(expected_networks)
LOG.info('Network TODO - C(%d) U(%d) D(%d)',
len(self.networks_to_create),
len(self.networks_to_update),
len(self.networks_to_destroy))
def build(self):
network_config = self._get_environment_config()['networks']
for name in self.networks_to_create:
LOG.info('Creating network %s', name)
c = network_config[name]
body = {
'network': {
'name': name
}
}
resp = self.ne_client.create_network(body=body)
LOG.info('Network %s created with id %s', name, resp['network']['id'])
self.store['_os-neutron_networks'].append(resp['network'])
def destroy(self):
for name in self.networks_to_destroy:
LOG.info('Skipping destroy of network %s', name)
if name == 'Ext-Net':
continue
LOG.info('Destroying network %s', name)
for network in self.store['_os-neutron_networks']:
if network['name'] == name:
self.ne_client.delete_network(network['id'])
LOG.info('Network %s with id %s destroyed', name,
network['id'])
class SubnetTask(NeutronTask):
provides = 'subnet'
depends = ['network']
subnets_to_create = None
subnets_to_update = None
subnets_to_destroy = None
def _get_subnets_from_config(self):
subnets = {}
networks = self._get_environment_config()['networks']
for network_name, network in networks.items():
nsubnets = network.get('subnets', {})
for nsubnet in nsubnets.values():
nsubnet['network'] = network_name
subnets.update(nsubnets)
return subnets
def introspect(self):
subnets = self.ne_client.list_subnets()['subnets']
self.store['_os-neutron_subnets'] = subnets
existing_subnets = set([s['name'] for s in subnets])
expected_subnets = set(self._get_subnets_from_config().keys())
self.subnets_to_create = expected_subnets.difference(existing_subnets)
self.subnets_to_update = expected_subnets.intersection(existing_subnets)
self.subnets_to_destroy = existing_subnets.difference(expected_subnets)
LOG.info('Subnet TODO - C(%d) U(%d) D(%d)',
len(self.subnets_to_create),
len(self.subnets_to_update),
len(self.subnets_to_destroy))
def build(self):
subnet_config = self._get_subnets_from_config()
for name in self.subnets_to_create:
LOG.info('Creating subnets %s', name)
c = subnet_config[name]
network_id = self._get_network_id_from_name(c['network'])
body = {
'subnet': {
'name': name,
'network_id': network_id,
'ip_version': c.get('ip_version', 4),
'cidr': c['cidr'],
}
}
resp = self.ne_client.create_subnet(body=body)
LOG.info('Subnet %s created with id %s', name, resp['subnet']['id'])
self.store['_os-neutron_subnets'].append(resp['subnet'])
def destroy(self):
for name in self.subnets_to_destroy:
LOG.info('Destroying subnet %s', name)
for subnet in self.store['_os-neutron_subnets']:
if subnet['name'] == name:
self.ne_client.delete_subnet(subnet['id'])
LOG.info('Subnet %s with id %s destroyed', name,
subnet['id'])
class RouterInterfaceTask(NeutronTask):
provides = 'router_interface'
depends = ['subnet', 'router']
def _parse_config(self):
router_interfaces = []
routers = self._get_environment_config()['routers']
for router_name, router in routers.items():
LOG.info("Found Router Name %s", router_name)
rsubnets = router.get('subnets', [])
for rsubnet in rsubnets:
LOG.info("Found Router Subnet %s", rsubnet)
router_interfaces.append((router_name, rsubnet, ))
return router_interfaces
def introspect(self):
ri_config = self._parse_config()
# TODO...
self.ri_to_create = ri_config
self.ri_to_update = []
self.ri_to_destroy = []
LOG.info('Router Interface TODO - C(%d) U(%d) D(%d)',
len(self.ri_to_create),
len(self.ri_to_update),
len(self.ri_to_destroy))
def build(self):
for ri in self.ri_to_create:
router_id = self._get_router_id_from_name(ri[0])
subnet_id = self._get_subnet_id_from_name(ri[1])
body = {"subnet_id": subnet_id}
try:
self.ne_client.add_interface_router(router_id, body)
except ne_exceptions.NeutronClientException as e:
if 'Router already has a port on subnet' in str(e):
pass
else:
raise
class SecurityGroupTask(NeutronTask):
provides = 'security_group'
depends = []
def __init__(self, runner, environment, store):
super(SecurityGroupTask, self).__init__(runner, environment, store)
self.security_groups = {
'default': {
'description': 'Default Security Group'
},
'beachhead': {
'description': 'Beachhead Security Group'
}
}
defined_security_groups = set()
defined_security_groups.update(self.runner.config['security_groups'].keys())
defined_security_groups.update(self.runner.config['roles'].keys())
for group_name in defined_security_groups:
self.security_groups[group_name] = {
'description': '%s instances' % group_name,
}
def introspect(self):
security_groups = self.ne_client.list_security_groups()['security_groups']
self.store['_os-neutron_security_groups'] = {s['name']: s for s in security_groups}
existing = set([s['name'] for s in security_groups])
expected = set(self.security_groups.keys())
self.to_create = expected.difference(existing)
self.to_update = expected.intersection(existing)
self.to_destroy = existing.difference(expected)
LOG.info('Security Group TODO - C(%d) U(%d) D(%d)',
len(self.to_create),
len(self.to_update),
len(self.to_destroy))
def build(self):
LOG.info('Building %s security groups', len(self.to_create))
for name in self.to_create:
LOG.info('Building security group with name %s', name)
body = {
"security_group": {
"name": name,
"description": self.security_groups[name]['description'],
},
}
self.ne_client.create_security_group(body)
def destroy(self):
LOG.info('Destroying %s security groups', len(self.to_destroy))
for name in self.to_destroy:
LOG.info('Destroying security group with name %s', name)
security_group_id = self.store['_os-neutron_security_groups'][name]['id']
self.ne_client.delete_security_group(security_group_id)
class SecurityGroupRuleTask(NeutronTask):
provides = 'security_group_rules'
depends = ['security_groups']
def __init__(self, runner, environment, store):
super(SecurityGroupRoleTask, self).__init__(runner, environment, store)
def introspect(self):
pass
def build(self):
pass
def destroy(self):
pass
|
|
"""Support for Google Actions Smart Home Control."""
import asyncio
from datetime import timedelta
import logging
from uuid import uuid4
from aiohttp import ClientError, ClientResponseError
from aiohttp.web import Request, Response
import jwt
# Typing imports
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES, HTTP_INTERNAL_SERVER_ERROR
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util import dt as dt_util
from .const import (
CONF_API_KEY,
CONF_CLIENT_EMAIL,
CONF_ENTITY_CONFIG,
CONF_EXPOSE,
CONF_EXPOSE_BY_DEFAULT,
CONF_EXPOSED_DOMAINS,
CONF_PRIVATE_KEY,
CONF_REPORT_STATE,
CONF_SECURE_DEVICES_PIN,
CONF_SERVICE_ACCOUNT,
GOOGLE_ASSISTANT_API_ENDPOINT,
HOMEGRAPH_SCOPE,
HOMEGRAPH_TOKEN_URL,
REPORT_STATE_BASE_URL,
REQUEST_SYNC_BASE_URL,
SOURCE_CLOUD,
)
from .helpers import AbstractConfig
from .smart_home import async_handle_message
_LOGGER = logging.getLogger(__name__)
def _get_homegraph_jwt(time, iss, key):
now = int(time.timestamp())
jwt_raw = {
"iss": iss,
"scope": HOMEGRAPH_SCOPE,
"aud": HOMEGRAPH_TOKEN_URL,
"iat": now,
"exp": now + 3600,
}
return jwt.encode(jwt_raw, key, algorithm="RS256").decode("utf-8")
async def _get_homegraph_token(hass, jwt_signed):
headers = {
"Authorization": f"Bearer {jwt_signed}",
"Content-Type": "application/x-www-form-urlencoded",
}
data = {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": jwt_signed,
}
session = async_get_clientsession(hass)
async with session.post(HOMEGRAPH_TOKEN_URL, headers=headers, data=data) as res:
res.raise_for_status()
return await res.json()
class GoogleConfig(AbstractConfig):
"""Config for manual setup of Google."""
def __init__(self, hass, config):
"""Initialize the config."""
super().__init__(hass)
self._config = config
self._access_token = None
self._access_token_renew = None
@property
def enabled(self):
"""Return if Google is enabled."""
return True
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return self._config.get(CONF_SECURE_DEVICES_PIN)
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self._config.get(CONF_REPORT_STATE)
def should_expose(self, state) -> bool:
"""Return if entity should be exposed."""
expose_by_default = self._config.get(CONF_EXPOSE_BY_DEFAULT)
exposed_domains = self._config.get(CONF_EXPOSED_DOMAINS)
if state.attributes.get("view") is not None:
# Ignore entities that are views
return False
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
explicit_expose = self.entity_config.get(state.entity_id, {}).get(CONF_EXPOSE)
domain_exposed_by_default = (
expose_by_default and state.domain in exposed_domains
)
# Expose an entity if the entity's domain is exposed by default and
# the configuration doesn't explicitly exclude it from being
# exposed, or if the entity is explicitly exposed
is_default_exposed = domain_exposed_by_default and explicit_expose is not False
return is_default_exposed or explicit_expose
def get_agent_user_id(self, context):
"""Get agent user ID making request."""
return context.user_id
def should_2fa(self, state):
"""If an entity should have 2FA checked."""
return True
async def _async_request_sync_devices(self, agent_user_id: str):
if CONF_API_KEY in self._config:
await self.async_call_homegraph_api_key(
REQUEST_SYNC_BASE_URL, {"agentUserId": agent_user_id}
)
elif CONF_SERVICE_ACCOUNT in self._config:
await self.async_call_homegraph_api(
REQUEST_SYNC_BASE_URL, {"agentUserId": agent_user_id}
)
else:
_LOGGER.error("No configuration for request_sync available")
async def _async_update_token(self, force=False):
if CONF_SERVICE_ACCOUNT not in self._config:
_LOGGER.error("Trying to get homegraph api token without service account")
return
now = dt_util.utcnow()
if not self._access_token or now > self._access_token_renew or force:
token = await _get_homegraph_token(
self.hass,
_get_homegraph_jwt(
now,
self._config[CONF_SERVICE_ACCOUNT][CONF_CLIENT_EMAIL],
self._config[CONF_SERVICE_ACCOUNT][CONF_PRIVATE_KEY],
),
)
self._access_token = token["access_token"]
self._access_token_renew = now + timedelta(seconds=token["expires_in"])
async def async_call_homegraph_api_key(self, url, data):
"""Call a homegraph api with api key authentication."""
websession = async_get_clientsession(self.hass)
try:
res = await websession.post(
url, params={"key": self._config.get(CONF_API_KEY)}, json=data
)
_LOGGER.debug(
"Response on %s with data %s was %s", url, data, await res.text()
)
res.raise_for_status()
return res.status
except ClientResponseError as error:
_LOGGER.error("Request for %s failed: %d", url, error.status)
return error.status
except (asyncio.TimeoutError, ClientError):
_LOGGER.error("Could not contact %s", url)
return HTTP_INTERNAL_SERVER_ERROR
async def async_call_homegraph_api(self, url, data):
"""Call a homegraph api with authentication."""
session = async_get_clientsession(self.hass)
async def _call():
headers = {
"Authorization": f"Bearer {self._access_token}",
"X-GFE-SSL": "yes",
}
async with session.post(url, headers=headers, json=data) as res:
_LOGGER.debug(
"Response on %s with data %s was %s", url, data, await res.text()
)
res.raise_for_status()
return res.status
try:
await self._async_update_token()
try:
return await _call()
except ClientResponseError as error:
if error.status == 401:
_LOGGER.warning(
"Request for %s unauthorized, renewing token and retrying", url
)
await self._async_update_token(True)
return await _call()
raise
except ClientResponseError as error:
_LOGGER.error("Request for %s failed: %d", url, error.status)
return error.status
except (asyncio.TimeoutError, ClientError):
_LOGGER.error("Could not contact %s", url)
return HTTP_INTERNAL_SERVER_ERROR
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
data = {
"requestId": uuid4().hex,
"agentUserId": agent_user_id,
"payload": message,
}
await self.async_call_homegraph_api(REPORT_STATE_BASE_URL, data)
class GoogleAssistantView(HomeAssistantView):
"""Handle Google Assistant requests."""
url = GOOGLE_ASSISTANT_API_ENDPOINT
name = "api:google_assistant"
requires_auth = True
def __init__(self, config):
"""Initialize the Google Assistant request handler."""
self.config = config
async def post(self, request: Request) -> Response:
"""Handle Google Assistant requests."""
message: dict = await request.json()
result = await async_handle_message(
request.app["hass"],
self.config,
request["hass_user"].id,
message,
SOURCE_CLOUD,
)
return self.json(result)
|
|
#!/usr/bin/env python3
from pathlib import Path
from configparser import RawConfigParser
from collections import OrderedDict
import itertools
import json
from collections import defaultdict
import os
print('parsing babel language mapping')
import sqlite3
DB = sqlite3.connect(':memory:')
class Row(sqlite3.Row):
def __getattr__(self, name):
return self[name]
def __repr__(self):
return str(tuple([self[k] for k in self.keys()]))
def as_dict(self):
return { k: self[k] for k in self.keys() }
DB.row_factory = Row
class Trie:
def __init__(self):
self.letters={}
def addString(self,s):
letters=self.letters
for c in s:
if(c not in letters):
letters[c]={"freq":1}
else:
letters[c]["freq"]+=1
letters=letters[c]
letters["*"]=True #marks the end of word
def generateUniquePrefix(self,s):
prefix=[]
letters=self.letters
for c in s:
prefix.append(c)
if(letters[c]["freq"]==1):
break
letters=letters[c]
return "".join(prefix)
@classmethod
def prefix(cls, A):
t=Trie()
for s in A:
t.addString(s)
ans=[]
for s in A:
prefix=t.generateUniquePrefix(s)
ans.append(prefix)
return {k: next(v for v in A if v.startswith(k)) for k in ans}
DB.execute('CREATE TABLE biblatex (langid NOT NULL PRIMARY KEY)')
DB.executemany('INSERT INTO biblatex (langid) VALUES (?)', [(path.stem.lower(),) for path in Path('submodules/biblatex/tex/latex/biblatex/lbx').glob('*.lbx')])
class MultiOrderedDict(OrderedDict):
def __setitem__(self, key, value):
if isinstance(value, list) and key in self:
self[key].extend(value)
else:
super().__setitem__(key, value)
DB.execute('CREATE TABLE babel (tag NOT NULL, prio NOT NULL, rel NOT NULL, langid NOT NULL)')
for path in sorted(Path('submodules/babel/locale').rglob('*.ini'), key=lambda p: p.name):
locale = RawConfigParser(dict_type=MultiOrderedDict, strict=False)
locale.read(str(path))
locale = locale['identification']
if 'name.babel' not in locale:
print(' ', path.name, 'has no name')
continue
tag = locale['tag.bcp47'].lower()
for prio, rel, name in zip([1] + [2] * len(locale['name.babel']), ['name'] + ['alias'] * len(locale['name.babel']), locale['name.babel'].split(' ')):
DB.execute('INSERT INTO babel (tag, prio, rel, langid) VALUES (?, ?, ?, ?)', (tag, prio, rel, name.lower()))
for key, name in locale.items():
if key.startswith('name.babel.'):
DB.execute('''
INSERT INTO babel (tag, prio, rel, langid)
SELECT :tag, :prio, :rel, :name
WHERE NOT EXISTS(SELECT 1 FROM babel WHERE tag = :tag AND langid = :name)
''', { 'tag': tag, 'prio': 3, 'rel': 'alias', 'name': name.lower() })
for rel in ['polyglossia', 'local', 'english']:
key = 'name.' + rel
if key in locale:
DB.execute('''
INSERT INTO babel (tag, prio, rel, langid)
SELECT :tag, :prio, :rel, :langid
WHERE NOT EXISTS(SELECT 1 FROM babel WHERE tag = :tag AND langid = :langid)
''', { 'tag': tag, 'prio': 4, 'rel': rel, 'langid': locale[key].lower() })
# cleanup
DB.execute('''
DELETE FROM babel
WHERE
(tag LIKE 'de%-1901' AND langid LIKE 'n%')
OR
(tag LIKE 'de%-1996' AND langid NOT LIKE 'n%')
OR
(tag = 'es-mx' AND langid = 'spanish')
''')
# select name from biblatex-preferred, mark with 0 as selected
DB.execute('''
WITH preferred AS (
SELECT ROWID, tag, prio, langid, ROW_NUMBER () OVER (PARTITION BY tag ORDER BY prio, langid) as ranking
FROM babel
WHERE langid IN (SELECT langid FROM biblatex)
)
UPDATE babel
SET prio = 0
WHERE ROWID IN (SELECT ROWID FROM preferred WHERE ranking = 1)
''')
# mark babel name as selected for all others
DB.execute('''
UPDATE babel
SET prio = 0
WHERE prio = 1 AND NOT EXISTS(SELECT 1 FROM babel sel WHERE sel.prio = 0 AND sel.tag = babel.tag)
''')
DB.execute('CREATE TABLE langmap (language NOT NULL PRIMARY KEY, langid NOT NULL)')
DB.execute('INSERT INTO langmap (language, langid) SELECT tag, langid FROM babel WHERE prio = 0')
# set self-alias
DB.execute('''
INSERT INTO langmap (language, langid)
SELECT DISTINCT langid, langid
FROM langmap
WHERE langid NOT IN (SELECT language FROM langmap)
''')
# 3-char abbreviation
DB.execute('''
WITH abbr AS (
SELECT DISTINCT REPLACE(SUBSTR(language, 1, 3), '-', '') AS language, langid
FROM langmap
),
abbr_groups AS (
SELECT language, COUNT(*) AS n FROM abbr GROUP BY language
)
INSERT INTO langmap (language, langid)
SELECT a.language, a.langid
FROM abbr a
JOIN abbr_groups g ON a.language = g.language
WHERE LENGTH(a.language) = 3 AND g.n = 1 AND a.language NOT IN (SELECT language FROM langmap)
''')
# langids that map to a single tag (strict alias)
DB.execute('''
WITH groupcount AS (
SELECT langid, COUNT(*) AS groupcount
FROM babel
WHERE prio <> 0
GROUP BY langid
)
INSERT INTO langmap (language, langid)
SELECT alias.langid as tag, lang.langid as langid
FROM groupcount
JOIN babel alias ON alias.langid = groupcount.langid AND alias.prio <> 0
JOIN babel lang ON lang.tag = alias.tag AND lang.prio = 0
WHERE
groupcount.groupcount = 1
AND
alias.langid NOT IN (SELECT language FROM langmap)
''')
# add tag-tag
DB.execute('''
WITH tagtag AS (
SELECT language || '-' || language as tag, langid
FROM langmap
WHERE language NOT LIKE '%-%' AND LENGTH(language) = 2
)
INSERT INTO langmap (language, langid)
SELECT tag, langid
FROM tagtag
WHERE tag NOT IN (SELECT language FROM langmap)
''')
# language alias all with same prefix
langids = defaultdict(list)
for row in DB.execute('SELECT * FROM babel WHERE prio <> 0 AND langid NOT IN (SELECT language FROM langmap) ORDER BY langid'):
langids[row.langid].append(row)
for langid, mapping in langids.items():
prefix = os.path.commonprefix([lang.tag for lang in mapping])
if len(prefix) > 0:
if prefix[-1] == '-':
prefix = prefix[:-1]
DB.execute('INSERT INTO langmap (language, langid) SELECT ?, langid FROM langmap WHERE language = ?', (langid, prefix))
# manual patchups
patchups = {
'gaelic': 'scottishgaelic',
'norwegian': 'norsk',
}
for language, langid in patchups.items():
DB.execute('INSERT INTO langmap (language, langid) SELECT ?, ? WHERE EXISTS (SELECT 1 FROM langmap WHERE langid = ?)', (language, langid, langid))
# all unique prefixes
for prefix, language in Trie.prefix([row.language for row in DB.execute('SELECT language FROM langmap')]).items():
continue # disable for now
if prefix[-1] == '-': prefix = prefix[:-1]
if len(prefix) < 3: continue # don't match very short IDs
DB.execute('''
INSERT INTO langmap (language, langid)
SELECT ?, langid
FROM langmap
WHERE language = ? AND NOT EXISTS (SELECT 1 FROM langmap WHERE language = ?)
''', (prefix, language, prefix))
for row in DB.execute('SELECT * FROM babel WHERE prio <> 0 AND langid NOT IN (SELECT language FROM langmap) ORDER BY langid'):
print(' ', row.langid, '=>', row.tag, 'not mapped')
os.makedirs('gen/babel', exist_ok=True)
with open('gen/babel/langmap.json', 'w') as f:
json.dump({ row.language: row.langid for row in DB.execute('SELECT * from langmap ORDER BY language')}, f, indent=' ')
with open('gen/babel/ids.json', 'w') as f:
json.dump([ row.langid for row in DB.execute('SELECT DISTINCT langid from langmap ORDER BY langid')], f, indent=' ')
with open('gen/babel/tag.json', 'w') as f:
tag = {}
for langid in ['en', 'ja', 'zh', 'de']:
language = f"SELECT DISTINCT langid FROM langmap WHERE language = '{langid}' OR language LIKE '{langid}-%'"
language = DB.execute(f'''
{language}
UNION
SELECT DISTINCT language FROM langmap WHERE langid IN ({language})
''')
for row in language:
tag[row.langid] = langid
json.dump(tag, f, indent=' ')
#for line in DB.iterdump():
# print(line)
|
|
#!/usr/bin/env python
"""
SlipStream Client
=====
Copyright (C) 2014 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import os.path
import re
import sys
import xml.etree.ElementTree as ET
from slipstream.command.CommandBase import CommandBase
from slipstream.HttpClient import HttpClient
import slipstream.util as util
class MainProgram(CommandBase):
'''Recursively download SlipStream modules as XML from server.'''
def __init__(self, argv=None):
self.module = ''
self.endpoint = None
super(MainProgram, self).__init__(argv)
def parse(self):
usage = '''usage: %prog [options] <module-url>
<module-uri> Name of the root module.'''
self.parser.usage = usage
self.addEndpointOption()
self.parser.add_option('--remove-cloud-specific', dest='remove_clouds',
help='Remove all cloud specific elements (image ids, cloud parameters, ...)',
default=False, action='store_true')
self.parser.add_option('--dump-image-ids', dest='dump_image_ids',
help='Store image IDs found in image modules into per module files.',
default=False, action='store_true')
self.parser.add_option('--dump-image-ids-dir', dest='dump_image_ids_dir',
help='Path to the directory to store the image IDs files. Default: current directory.',
default='.')
self.parser.add_option('--remove-group-members', dest='remove_group_members',
help='Remove members of the group in the authorizations',
default=False, action='store_true')
self.parser.add_option('--reset-commit-message', dest='reset_commit_message',
help='Replace the commit message by "Initial version of this module"',
default=False, action='store_true')
self.parser.add_option('--flat', dest='flat_export',
help='Download without creating subdirectories',
default=False, action='store_true')
self.parser.add_option('--continue-on-error', dest='continue_on_error',
help='Continue processing when an error occurs',
default=False, action='store_true')
self.options, self.args = self.parser.parse_args()
self._checkArgs()
def _checkArgs(self):
if len(self.args) == 1:
self.module = self.args[0]
if len(self.args) > 1:
self.usageExitTooManyArguments()
@staticmethod
def _remove_element(parent, element):
if element is not None:
parent.remove(element)
def _remove_transient_elements(self, root):
self._remove_element(root, root.find('inputParametersExpanded'))
self._remove_element(root, root.find('packagesExpanded'))
self._remove_element(root, root.find('targetsExpanded'))
self._remove_element(root, root.find('buildStates'))
self._remove_element(root, root.find('runs'))
def _remove_clouds(self, root):
"""Remove the cloudImageIdentifiers, cloudNames and cloud specific parameters element from the given document.
These elements are not portable between SlipStream deployments."""
ids = root.find('cloudImageIdentifiers')
if ids is not None:
for id in ids.findall('*'):
ids.remove(id)
cloud_names = root.find('cloudNames')
if cloud_names is not None:
cloud_names.attrib['length'] = '0'
parameters = root.find('parameters')
if parameters is not None:
for cloud_name in cloud_names.findall('*'):
# The following XPath query doesn't work with Python < 2.7
cloud_parameters = parameters.findall("./entry/parameter[@category='%s'].." % cloud_name.text)
if cloud_parameters is not None:
for cloud_parameter in cloud_parameters:
parameters.remove(cloud_parameter)
cloud_names.remove(cloud_name)
def _remove_group_members(self, root):
authz = root.find('authz')
if authz is None:
return
group_members = authz.find('groupMembers')
if group_members is None:
return
for group_member in group_members.findall('*'):
group_members.remove(group_member)
def _reset_commit_message(self, root):
authz = root.find('authz')
commit = root.find('commit')
if commit is None:
return
commit.attrib['author'] = (authz is not None) and authz.attrib.get('owner', 'super') or 'super'
comment = commit.find('comment')
if comment is None:
return
comment.text = 'Initial version of this module'
def _retrieveModuleAsXml(self, client, module):
uri = util.MODULE_RESOURCE_PATH
uri += '/' + module
url = self.options.endpoint + uri
# IMPORTANT: pass BINARY content to ET. It will deduce the correct encoding.
resp, _ = client.get(url)
xml = resp.content
return ET.fromstring(xml)
def _writeModuleAsXml(self, root_element, module):
if self.options.flat_export:
module = module.replace('/', '_')
else:
if root_element.attrib.get('category', '').lower().strip() == 'project':
module = os.path.join(module, os.path.basename(module))
try:
os.makedirs(os.path.dirname(module), 0775)
except OSError as e:
pass
ET.ElementTree(root_element).write('%s.xml' % module)
def _getModuleChildren(self, module, root_element):
children = []
for child in root_element.findall('children/item'):
module_name = child.attrib['name']
module_path = '%s/%s' % (module, module_name)
children.append(module_path)
return children
@staticmethod
def _is_image(root):
return root.tag == 'imageModule'
def _dump_image_ids(self, root):
ids = root.find('cloudImageIdentifiers')
if ids is not None:
module_name = root.get('shortName')
module_path = re.sub('^module/', '', root.get('parentUri'))
module_uri = "%s/%s" % (module_path, module_name)
cloud_ids = ''
for _id in ids.findall('*'):
cloud_ids += "%s = %s:%s\n" % (module_uri, _id.get('cloudServiceName'),
_id.get('cloudImageIdentifier'))
if cloud_ids:
ids_dir = self.options.dump_image_ids_dir
if not os.path.exists(ids_dir):
os.makedirs(ids_dir)
fn = '%s_%s' % (module_path.replace('/','_'), module_name)
ids_file = "%s/%s.conf" % (ids_dir, fn)
with open(ids_file, 'w') as fh:
fh.write(cloud_ids)
def doWork(self):
if self.options.remove_clouds and sys.version_info[0:3] < (2, 7, 0):
print('Error: The use of "--remove-cloud-specific" require Python >= 2.7', file=sys.stderr)
sys.exit(1)
client = HttpClient()
client.verboseLevel = self.verboseLevel
queue = [self.module]
while len(queue) > 0:
module = queue.pop(0)
print('Processing: %s' % module)
try:
root = self._retrieveModuleAsXml(client, module)
self._remove_transient_elements(root)
if self._is_image(root) and self.options.dump_image_ids:
self._dump_image_ids(root)
if self.options.remove_clouds:
self._remove_clouds(root)
if self.options.remove_group_members:
self._remove_group_members(root)
if self.options.reset_commit_message:
self._reset_commit_message(root)
self._writeModuleAsXml(root, module)
for child in self._getModuleChildren(module, root):
queue.append(child)
except Exception as e:
print(e)
if not self.options.continue_on_error:
sys.exit(1)
if __name__ == "__main__":
try:
MainProgram()
except KeyboardInterrupt:
print('\n\nExecution interrupted by the user... goodbye!')
sys.exit(-1)
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test case for views."""
import time
import pytest
from flask import url_for
from flask_oauthlib.client import OAuth as FlaskOAuth
from helpers import check_response_redirect_url, \
check_response_redirect_url_args
from invenio_accounts.testutils import login_user_via_session
from invenio_db import db
from itsdangerous import TimedJSONWebSignatureSerializer
from mock import MagicMock
from simplejson import JSONDecodeError
from six.moves.urllib_parse import parse_qs, urlparse
from invenio_oauthclient import InvenioOAuthClientREST
from invenio_oauthclient._compat import _create_identifier
from invenio_oauthclient.handlers import token_getter
from invenio_oauthclient.models import RemoteToken
from invenio_oauthclient.views.client import rest_blueprint, serializer
def mock_response(oauth, remote_app='test', data=None):
"""Mock the oauth response to use the remote."""
# Mock oauth remote application
oauth.remote_apps[remote_app].handle_oauth2_response = MagicMock(
return_value=data or {
'access_token': 'test_access_token',
'scope': "",
'token_type': 'bearer'
}
)
def test_redirect_uri(app_rest):
"""Test redirect uri."""
with app_rest.test_client() as client:
# Test redirect
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test',
next='http://inveniosoftware.org'))
assert resp.status_code == 302
# Verify parameters
params = parse_qs(urlparse(resp.location).query)
assert params['response_type'] == ['code']
assert params['client_id'] == ['testid']
assert params['redirect_uri']
assert params['state']
# Verify next parameter in state token does not allow blanco redirects
state = serializer.loads(params['state'][0])
assert state['next'] is None
# Assert redirect uri does not have any parameters.
params = parse_qs(urlparse(params['redirect_uri'][0]).query)
assert params == {}
# Assert that local redirects are allowed
test_urls = [
'/',
'/search'
]
for url in test_urls:
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test', next=url))
check_response_redirect_url(resp, url)
# Assert that absolute redirects are allowed only if
# `APP_ALLOWED_HOSTS` is set and includes them. Otherwise, the relative
# path of the url is extracted and returned. Note if you need to
# redirect to index page you should pass '/' as next parameter.
test_url = 'http://inveniosoftware.org/test'
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test', next=test_url))
check_response_redirect_url(resp, urlparse(test_url).path)
app_rest.config.update({"APP_ALLOWED_HOSTS": ["inveniosoftware.org"]})
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test', next=test_url))
check_response_redirect_url(resp, test_url)
def test_login(app_rest):
"""Test login."""
with app_rest.test_client() as client:
# Test redirect
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test', next='/'))
assert resp.status_code == 302
params = parse_qs(urlparse(resp.location).query)
assert params['response_type'] == ['code']
assert params['client_id'] == ['testid']
assert params['redirect_uri']
assert params['state']
# Invalid remote
resp = client.get(
url_for('invenio_oauthclient.rest_login', remote_app='invalid'),
follow_redirects=True
)
assert resp.status_code == 404
def test_authorized(base_app, params):
"""Test login."""
handled = {}
def test_authorized_handler(resp, remote, *args, **kwargs):
"""Save configuration."""
handled['resp'] = resp
handled['remote'] = remote
handled['args'] = args
handled['kwargs'] = kwargs
return 'TEST'
def test_invalid_authorized_handler(resp, remote, *args, **kwargs):
"""Set wrong configuration."""
handled['resp'] = 1
handled['remote'] = 1
handled['args'] = 1
handled['kwargs'] = 1
base_app.config['OAUTHCLIENT_REST_REMOTE_APPS'].update(
dict(
test=dict(
authorized_handler=test_authorized_handler,
params=params('testid'),
title='MyLinkedTestAccount',
),
test_invalid=dict(
authorized_handler=test_invalid_authorized_handler,
params=params('test_invalidid'),
title='Test Invalid',
),
full=dict(
params=params('fullid'),
title='Full',
),
))
FlaskOAuth(base_app)
InvenioOAuthClientREST(base_app)
base_app.register_blueprint(rest_blueprint)
with base_app.test_client() as client:
# Ensure remote apps have been loaded (due to before first
# request)
client.get(url_for(
'invenio_oauthclient.rest_login', remote_app='test'))
mock_response(base_app.extensions['oauthlib.client'], 'test')
mock_response(base_app.extensions['oauthlib.client'], 'test_invalid')
from invenio_oauthclient.views.client import serializer
state = serializer.dumps({
'app': 'test',
'sid': _create_identifier(),
'next': None,
})
resp = client.get(
url_for(
'invenio_oauthclient.rest_authorized',
remote_app='test',
code='test',
state=state
)
)
assert resp.data == b'TEST'
assert handled['remote'].name == 'test'
assert not handled['args']
assert not handled['kwargs']
assert handled['resp']['access_token'] == 'test_access_token'
state = serializer.dumps({
'app': 'test_invalid',
'sid': _create_identifier(),
'next': None,
})
# handler should return something
# Flask>1.0 is throwing TypeError and Flask<1.0 ValueError
with pytest.raises((ValueError, TypeError)):
client.get(url_for(
'invenio_oauthclient.rest_authorized',
remote_app='test_invalid',
code='test',
state=state,
))
def test_invalid_authorized_response(app_rest):
"""Test login."""
oauth = app_rest.extensions['oauthlib.client']
with app_rest.test_client() as client:
# Fake an authorized request
# Ensure remote apps have been loaded (due to before first
# request)
client.get(url_for(
'invenio_oauthclient.rest_login', remote_app='test'))
oauth.remote_apps['test'].handle_oauth2_response = MagicMock(
side_effect=JSONDecodeError('Expecting value', '', 0)
)
state = serializer.dumps({
'app': 'test',
'sid': _create_identifier(),
'next': None,
})
with pytest.raises(JSONDecodeError):
client.get(url_for(
'invenio_oauthclient.rest_authorized',
remote_app='test',
code='test',
state=state
))
def test_state_token(app_rest, monkeypatch):
"""Test state token."""
# Mock session id
monkeypatch.setattr('invenio_oauthclient._compat._create_identifier',
lambda: '1234')
monkeypatch.setattr(
'invenio_oauthclient.views.client._create_identifier', lambda: '1234')
with app_rest.test_client() as client:
# Ensure remote apps have been loaded (due to before first
# request)
client.get(url_for(
'invenio_oauthclient.rest_login', remote_app='test'))
mock_response(app_rest.extensions['oauthlib.client'], 'test')
# Good state token
state = serializer.dumps(
{'app': 'test', 'sid': '1234', 'next': None, }
)
resp = client.get(
url_for('invenio_oauthclient.rest_authorized', remote_app='test',
code='test', state=state)
)
assert resp.status_code == 200
outdated_serializer = TimedJSONWebSignatureSerializer(
app_rest.config['SECRET_KEY'],
expires_in=0,
)
# Bad state - timeout
state1 = outdated_serializer.dumps(
{'app': 'test', 'sid': '1234', 'next': None, }
)
# Bad state - app
state2 = serializer.dumps(
# State for another existing app (test_invalid exists)
{'app': 'test_invalid', 'sid': '1234', 'next': None, }
)
# Bad state - sid
state3 = serializer.dumps(
# State for another existing app (test_invalid exists)
{'app': 'test', 'sid': 'bad', 'next': None, }
)
time.sleep(1)
for s in [state1, state2, state3]:
resp = client.get(
url_for(
'invenio_oauthclient.rest_authorized', remote_app='test',
code='test', state=s)
)
assert resp.status_code == 302
assert parse_qs(urlparse(resp.location).query)['code'][0] == '403'
def test_no_remote_app(app_rest):
"""Test no remote app."""
with app_rest.test_client() as client:
resp = client.get(
url_for(
'invenio_oauthclient.rest_authorized', remote_app='invalid')
)
assert resp.status_code == 404
resp = client.get(
url_for(
'invenio_oauthclient.rest_disconnect', remote_app='invalid')
)
assert resp.status_code == 404
resp = client.get(
url_for('invenio_oauthclient.rest_signup', remote_app='invalid')
)
assert resp.status_code == 404
def test_token_getter_setter(app_rest, monkeypatch):
"""Test token getter setter."""
# Mock session id
monkeypatch.setattr('invenio_oauthclient._compat._create_identifier',
lambda: '1234')
monkeypatch.setattr(
'invenio_oauthclient.views.client._create_identifier', lambda: '1234')
oauth = app_rest.extensions['oauthlib.client']
# Mock user
user = MagicMock()
user.id = 1
user.get_id = MagicMock(return_value=1)
user.is_anonymous = False
with app_rest.test_client() as c:
login_user_via_session(c, user)
# First call login to be redirected
res = c.get(url_for('invenio_oauthclient.rest_login',
remote_app='full'))
assert res.status_code == 302
assert res.location.startswith(
oauth.remote_apps['full'].authorize_url
)
state = parse_qs(urlparse(res.location).query)['state'][0]
# Mock resposen class
mock_response(app_rest.extensions['oauthlib.client'], 'full')
# Imitate that the user authorized our request in the remote
# application.
c.get(url_for(
'invenio_oauthclient.rest_authorized', remote_app='full',
code='test', state=state,
))
# Assert if everything is as it should be.
from flask import session as flask_session
assert flask_session['oauth_token_full'] == \
('test_access_token', '')
t = RemoteToken.get(1, 'fullid')
assert t.remote_account.client_id == 'fullid'
assert t.access_token == 'test_access_token'
assert RemoteToken.query.count() == 1
# Mock a new authorized request
mock_response(app_rest.extensions['oauthlib.client'], 'full', data={
'access_token': 'new_access_token',
'scope': "",
'token_type': 'bearer'
})
c.get(url_for(
'invenio_oauthclient.rest_authorized', remote_app='full',
code='test', state=state
))
t = RemoteToken.get(1, 'fullid')
assert t.access_token == 'new_access_token'
assert RemoteToken.query.count() == 1
val = token_getter(
app_rest.extensions['oauthlib.client'].remote_apps['full'])
assert val == ('new_access_token', '')
# Disconnect account
res = c.get(url_for(
'invenio_oauthclient.rest_disconnect', remote_app='full',
))
assert res.status_code == 302
expected_url_args = {
"message": "Successfully disconnected.",
"code": 200
}
check_response_redirect_url_args(res, expected_url_args)
# Assert that remote account have been removed.
t = RemoteToken.get(1, 'fullid')
assert t is None
# TODO: Figure out what is leaving session open & blocked
db.session.close()
def test_rejected(app_rest, monkeypatch):
"""Test rejected."""
# Mock session id
monkeypatch.setattr('invenio_oauthclient._compat._create_identifier',
lambda: '1234')
monkeypatch.setattr(
'invenio_oauthclient.views.client._create_identifier', lambda: '1234')
oauth = app_rest.extensions['oauthlib.client']
# Mock user id
user = MagicMock()
user.get_id = MagicMock(return_value=1)
user.is_authenticated = MagicMock(return_value=True)
with app_rest.test_client() as c:
login_user_via_session(c, user)
# First call login to be redirected
res = c.get(url_for('invenio_oauthclient.rest_login',
remote_app='full'))
assert res.status_code == 302
assert res.location.startswith(
oauth.remote_apps['full'].authorize_url
)
# Mock response to imitate an invalid response. Here, an
# example from GitHub when the code is expired.
mock_response(
app_rest.extensions['oauthlib.client'],
'full',
data=dict(
error_uri='http://developer.github.com/v3/oauth/'
'#bad-verification-code',
error_description='The code passed is '
'incorrect or expired.',
error='bad_verification_code',
)
)
# Imitate that the user authorized our request in the remote
# application (however, the remote app will son reply with an
# error)
state = serializer.dumps({
'app': 'full', 'sid': '1234', 'next': None,
})
res = c.get(url_for(
'invenio_oauthclient.rest_authorized', remote_app='full',
code='test', state=state
))
assert res.status_code == 302
|
|
import datetime
from itertools import chain, product
import json
import re
from urllib.parse import urlparse
from flask import current_app
from sqlalchemy import desc
from mock import ANY
import pytest
from dmtestutils.comparisons import AnyStringMatching, AnySupersetOf, RestrictedAny
from app import db
from app.models import (
ArchivedService,
AuditEvent,
BriefResponse,
DirectAwardProject,
DirectAwardSearch,
Outcome,
User,
)
from tests.bases import BaseApplicationTest
from ...helpers import FixtureMixin
class TestUpdateOutcome(BaseApplicationTest, FixtureMixin):
_test_update_outcome_base_scenarios = (
(
# other_oc_data
{},
# initial_data
{},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "00314.1500",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "314.15",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
}),
},
),
(
# other_oc_data
None,
# initial_data
{
"completed_at": None,
"result": "none-suitable",
},
# put_values
{
"completed": True,
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": True,
}),
},
),
(
# other_oc_data
{
"completed_at": None,
"result": "cancelled",
},
# initial_data
{
"completed_at": None,
"result": "none-suitable",
},
# put_values
{
"completed": True,
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": True,
}),
},
),
(
# other_oc_data
None,
# initial_data
{},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "00314.1500",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "314.15",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
}),
},
),
(
# other_oc_data
{
"completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
"awarding_organisation_name": "Lambay Freehold",
"award_value": 54321,
"start_date": datetime.date(2010, 12, 12),
"end_date": datetime.date(2011, 12, 12),
},
# initial_data
{},
# put_values
{
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "00314.1500",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": False,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "314.15",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
}),
},
),
(
# other_oc_data
{
"awarding_organisation_name": "Lambay Freehold",
"award_value": 54321,
"start_date": datetime.date(2010, 12, 12),
"end_date": datetime.date(2011, 12, 12),
},
# initial_data
{},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "00314.1500",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "314.15",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
}),
},
),
(
# other_oc_data
{},
# initial_data
{
"completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
"awarding_organisation_name": "Lambay Freehold",
"award_value": 54321,
"start_date": datetime.date(2010, 12, 12),
"end_date": datetime.date(2011, 12, 12),
},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "Incubator",
"awardValue": "271271.2",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"award": {
"awardingOrganisationName": "Incubator",
"awardValue": "271271.20",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
}),
},
),
(
# other_oc_data
None,
# initial_data
{
"completed_at": None,
"awarding_organisation_name": "Lambay Freehold",
"award_value": 54321,
"start_date": datetime.date(2010, 12, 12),
"end_date": datetime.date(2011, 12, 12),
},
# put_values
{
"completed": False,
"award": {
"startDate": None,
},
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": False,
"award": {
"awardingOrganisationName": "Lambay Freehold",
"awardValue": "54321.00",
"startDate": None,
"endDate": "2011-12-12",
},
}),
},
),
(
# other_oc_data
{},
# initial_data
{
"completed_at": None,
"awarding_organisation_name": "Lambay Freehold",
"award_value": 5432.1,
"start_date": datetime.date(2010, 12, 12),
"end_date": datetime.date(2011, 12, 12),
},
# put_values
{
"completed": True,
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": True,
"award": {
"awardingOrganisationName": "Lambay Freehold",
"awardValue": "5432.10",
"startDate": "2010-12-12",
"endDate": "2011-12-12",
},
}),
},
),
(
# other_oc_data
{},
# initial_data
{
"completed_at": None,
"result": "none-suitable",
},
# put_values
{
"award": {
"awardingOrganisationName": "Talbot de Malahide",
},
},
# expected_status_code
400,
# expected_response_data
{
"error": (
"awarding_organisation_name cannot be set for Outcomes with result='none-suitable'."
" Attempted to set value 'Talbot de Malahide'"
),
},
),
(
# other_oc_data
{
"completed_at": None,
"result": "cancelled",
},
# initial_data
{
"completed_at": datetime.datetime(2010, 3, 3, 3, 3, 3),
"result": "none-suitable",
},
# put_values
{
"completed": False,
},
# expected_status_code
400,
# expected_response_data
{
"error": "Can't un-complete outcome",
},
),
(
# other_oc_data
{
"completed_at": datetime.datetime(2010, 3, 3, 3, 3, 3),
"result": "cancelled",
},
# initial_data
{
"completed_at": None,
"result": "none-suitable",
},
# put_values
{
"completed": True,
},
# expected_status_code
400,
# expected_response_data
{
"error": AnyStringMatching(
r".+ \d+ already has a complete outcome: \d+"
),
},
),
(
# other_oc_data
None,
# initial_data
{},
# put_values
{
"result": "cancelled",
},
# expected_status_code
400,
# expected_response_data
{
"error": AnyStringMatching(r".*json was not a valid format.*", flags=re.I),
},
),
(
# other_oc_data
{},
# initial_data
{},
# put_values
{
"resultOfDirectAward": {
"projectId": 321,
},
},
# expected_status_code
400,
# expected_response_data
{
"error": AnyStringMatching(r".*json was not a valid format.*", flags=re.I),
},
),
(
# other_oc_data
None,
# initial_data
{},
# put_values
{
"completed": True,
# note "award" section flattened here
"awardingOrganisationName": "Omphalos",
"awardValue": "00314.1500",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
# expected_status_code
400,
# expected_response_data
{
"error": AnyStringMatching(r".*json was not a valid format.*", flags=re.I),
},
),
(
# other_oc_data
{
"completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
"awarding_organisation_name": "Lambay Freehold",
"award_value": 54321,
"start_date": datetime.date(2010, 12, 12),
"end_date": datetime.date(2011, 12, 12),
},
# initial_data
{},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "00314.1500",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
400,
# expected_response_data
{
"error": AnyStringMatching(
r".+ \d+ already has a complete outcome: \d+"
),
},
),
(
# other_oc_data
{
"completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
"result": "cancelled",
},
# initial_data
{},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "00314.1500",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
400,
# expected_response_data
{
"error": AnyStringMatching(
r".+ \d+ already has a complete outcome: \d+",
),
},
),
(
# other_oc_data
{},
# initial_data
{
"completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
"awarding_organisation_name": "Lambay Freehold",
"award_value": 54321,
"start_date": datetime.date(2010, 12, 12),
"end_date": datetime.date(2011, 12, 12),
},
# put_values
{
"completed": False,
"award": {
"awardingOrganisationName": "Incubator",
"awardValue": "271271.2",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
400,
# expected_response_data
{
"error": "Can't un-complete outcome",
},
),
(
# other_oc_data
None,
# initial_data
{
"completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
"awarding_organisation_name": "Lambay Freehold",
"award_value": 54321,
"start_date": datetime.date(2010, 12, 12),
"end_date": datetime.date(2011, 12, 12),
},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "",
"awardValue": "271271.2",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
400,
# expected_response_data
{
"error": AnyStringMatching(r".*\bawarding_organisation_name\b.*"),
},
),
(
# other_oc_data
{},
# initial_data
{},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "Billy Pitt",
"awardValue": "314.15",
"startDate": "2020-10-10",
"endDate": "2020-20-20",
},
},
# expected_status_code
400,
# expected_response_data
{
"error": AnyStringMatching(r".*\bendDate\b.*"),
},
),
(
# other_oc_data
None,
# initial_data
{},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "Martello",
"awardValue": "Twelve quid",
"startDate": "2020-01-01",
"endDate": "2021-12-21",
},
},
# expected_status_code
400,
# expected_response_data
{
"error": AnyStringMatching(r".*\bawardValue\b.*"),
},
),
)
@pytest.mark.parametrize(
(
"other_oc_brief_based",
"initial_brief_based",
"other_oc_data",
"initial_data",
"put_values",
"expected_status_code",
"expected_response_data",
),
tuple(chain(
( # we reproduce here the variants in _test_update_outcome_base_scenarios, once for Briefs, once
# for Projects
(f_t, f_t,) + variant_params
for f_t, variant_params in product((False, True,), _test_update_outcome_base_scenarios)
),
( # and also include some with mixed target-types
(
# other_oc_brief_based
False,
# initial_brief_based
True,
# other_oc_data
{
"completed_at": datetime.datetime(2007, 7, 7, 7, 7, 7),
"result": "none-suitable",
},
# initial_data
{
"completed_at": None,
"result": "cancelled",
},
# put_values
{
"completed": True,
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": True,
}),
},
),
(
# other_oc_brief_based
True,
# initial_brief_based
False,
# other_oc_data
{
"completed_at": datetime.datetime(2007, 7, 7, 7, 7, 7),
"awarding_organisation_name": "Lambay Freehold",
"award_value": 54321,
"start_date": datetime.date(2010, 12, 12),
"end_date": datetime.date(2011, 12, 12),
},
# initial_data
{},
# put_values
{
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "00314.1500",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
},
# expected_status_code
200,
# expected_response_data
{
"outcome": AnySupersetOf({
"completed": True,
"award": {
"awardingOrganisationName": "Omphalos",
"awardValue": "314.15",
"startDate": "2020-10-10",
"endDate": "2020-11-20",
},
}),
},
),
),
)),
# help pytest make its printed representation of the parameter set a little more readable
ids=(lambda val: "EMPTYDCT" if val == {} else None),
)
def test_update_outcome_scenarios(
self,
other_oc_brief_based,
initial_brief_based,
other_oc_data,
initial_data,
put_values,
expected_status_code,
expected_response_data,
):
"""
A number of arguments control the background context this test is run in and the parameters PUT to the endpoint.
Not all of the combinations make sense together and a caller should not expect a test to pass with a nonsensical
combination of arguments
:param other_oc_brief_based: whether the "other", existing Outcome should be Brief-based as opposed to
Direct Award-based
:param initial_brief_based: whether the target Outcome should initially be set up to be Brief-based as
opposed to Direct Award-based
:param other_oc_data: field values to set up the "other" Outcome with, ``None`` for no "other"
Outcome to be created
:param initial_data: field values to initially set up the target Outcome with
:param put_values: payload dictionary to be PUT to the target endpoint (without the
``outcome`` wrapper)
:param expected_status_code:
:param expected_response_data:
"""
user_id = self.setup_dummy_user(id=1, role='buyer')
self.setup_dummy_suppliers(3)
project = None
search = None
chosen_archived_service = other_archived_service = None
if not (other_oc_brief_based and initial_brief_based):
# create required objects for direct award-based Outcome
self.setup_dummy_services(3, model=ArchivedService)
project = DirectAwardProject(
name="Lambay Island",
users=[User.query.get(user_id)],
)
db.session.add(project)
search = DirectAwardSearch(
project=project,
created_by=user_id,
active=True,
search_url="http://nothing.nowhere",
)
db.session.add(search)
for archived_service in db.session.query(ArchivedService).filter(
ArchivedService.service_id.in_(("2000000000", "2000000001",))
).all():
search.archived_services.append(archived_service)
chosen_archived_service, other_archived_service = search.archived_services[:2]
# else skip creating these to save time
brief = None
chosen_brief_response = other_brief_response = None
if other_oc_brief_based or initial_brief_based:
# create required objects for brief-based Outcome
brief = self.setup_dummy_brief(status="closed", user_id=user_id, data={})
chosen_brief_response, other_brief_response = (BriefResponse(
brief=brief,
supplier_id=i,
submitted_at=datetime.datetime.utcnow(),
data={},
) for i in (1, 2,))
db.session.add(chosen_brief_response)
db.session.add(other_brief_response)
# else skip creating these to save time
other_outcome = None
if other_oc_data is not None:
# create "other" Outcome for our target one to potentially clash with
other_outcome = Outcome(
**({"brief": brief} if other_oc_brief_based else {"direct_award_project": project}),
**({
"result": other_oc_data.get("result", "awarded"),
**({
"brief_response": other_brief_response,
} if other_oc_brief_based else {
"direct_award_search": search,
"direct_award_archived_service": other_archived_service,
}),
} if other_oc_data.get("result", "awarded") == "awarded" else {"result": other_oc_data["result"]}),
**{k: v for k, v in (other_oc_data or {}).items() if k not in ("completed_at", "result",)},
)
if "completed_at" in other_oc_data:
other_outcome.completed_at = other_oc_data["completed_at"]
db.session.add(other_outcome)
# create our target Outcome in its initial state
outcome = Outcome(
**({"brief": brief} if initial_brief_based else {"direct_award_project": project}),
**({
"result": initial_data.get("result", "awarded"),
**({
"brief_response": chosen_brief_response,
} if initial_brief_based else {
"direct_award_search": search,
"direct_award_archived_service": chosen_archived_service,
}),
} if initial_data.get("result", "awarded") == "awarded" else {"result": initial_data["result"]}),
**{k: v for k, v in (initial_data or {}).items() if k not in ("completed_at", "result",)},
)
if "completed_at" in initial_data:
# can only set completed_at after other fields have been set
outcome.completed_at = initial_data["completed_at"]
db.session.add(outcome)
# must assign ids before we can lock project
db.session.flush()
if project:
project.locked_at = datetime.datetime.now()
# make a concrete note of these so we don't have to fetch them back from the database after the request,
# potentially getting back values which have been inadvertantly changed
outcome_external_id = outcome.external_id
project_external_id = project and project.external_id
search_id = search and search.id
chosen_archived_service_id = chosen_archived_service and chosen_archived_service.id
chosen_archived_service_service_id = chosen_archived_service and chosen_archived_service.service_id
brief_id = brief and brief.id
chosen_brief_response_id = chosen_brief_response and chosen_brief_response.id
audit_event_count = AuditEvent.query.count()
db.session.commit()
# keep an nice concrete representation for later comparison
outcome_serialization_before = outcome.serialize()
res = self.client.put(
f"/outcomes/{outcome.external_id}",
data=json.dumps({
"updated_by": "lord.talbot@example.com",
"outcome": put_values,
}),
content_type="application/json",
)
assert res.status_code == expected_status_code
response_data = json.loads(res.get_data())
assert response_data == expected_response_data
# allow these to be re-used in this session, "refreshed"
db.session.add_all(x for x in (outcome, project, search, chosen_archived_service,) if x is not None)
db.session.expire_all()
if res.status_code != 200:
# assert change wasn't made, audit event wasn't added
assert outcome.serialize() == outcome_serialization_before
assert AuditEvent.query.count() == audit_event_count
else:
# an additional check of values we should be able to figure out the "correct" values for
assert response_data == {
"outcome": {
"id": outcome_external_id,
"result": initial_data.get("result", "awarded"),
"completed": (
bool(outcome_serialization_before.get("completedAt"))
or put_values.get("completed") is True
),
"completedAt": (
outcome_serialization_before.get("completedAt")
or (
AnyStringMatching(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?Z")
if put_values.get("completed") else None
)
),
**({
"resultOfFurtherCompetition": {
"brief": {
"id": brief_id,
},
**({
"briefResponse": {
"id": chosen_brief_response_id,
},
} if initial_data.get("result", "awarded") == "awarded" else {}),
},
} if initial_brief_based else {
"resultOfDirectAward": {
"project": {
"id": project_external_id,
},
**({
"search": {
"id": search_id,
},
"archivedService": {
"id": chosen_archived_service_id,
"service": {
"id": chosen_archived_service_service_id,
},
},
} if initial_data.get("result", "awarded") == "awarded" else {})
},
}),
**({"award": AnySupersetOf({})} if initial_data.get("result", "awarded") == "awarded" else {}),
}
}
# check changes actually got committed
assert response_data == {
"outcome": outcome.serialize(),
}
# check audit event(s) were saved
expect_complete_audit_event = put_values.get("completed") is True and not initial_data.get("completed_at")
n_expected_new_audit_events = 2 if expect_complete_audit_event else 1
assert AuditEvent.query.count() == audit_event_count + n_expected_new_audit_events
# grab those most recent (1 or) 2 audit events from the db, re-sorting them to be in a predictable order -
# we don't care whether the complete_outcome or update_outcome comes out of the db first
audit_events = sorted(
db.session.query(AuditEvent).order_by(
desc(AuditEvent.created_at),
desc(AuditEvent.id),
)[:n_expected_new_audit_events],
key=lambda ae: ae.type,
reverse=True,
)
assert audit_events[0].type == "update_outcome"
assert audit_events[0].object is outcome
assert audit_events[0].acknowledged is False
assert audit_events[0].acknowledged_at is None
assert not audit_events[0].acknowledged_by
assert audit_events[0].user == "lord.talbot@example.com"
assert audit_events[0].data == put_values
if expect_complete_audit_event:
assert audit_events[1].type == "complete_outcome"
assert audit_events[1].created_at == audit_events[0].created_at == outcome.completed_at
assert audit_events[1].object is outcome
assert audit_events[1].acknowledged is False
assert audit_events[1].acknowledged_at is None
assert not audit_events[1].acknowledged_by
assert audit_events[1].user == "lord.talbot@example.com"
assert audit_events[1].data == {}
def test_nonexistent_outcome(self):
res = self.client.put(
f"/outcomes/314159",
data=json.dumps({
"updated_by": "lord.talbot@example.com",
"outcome": {
"completed": True,
},
}),
content_type="application/json",
)
assert res.status_code == 404
assert json.loads(res.get_data()) == {
"error": "Outcome 314159 not found",
}
class TestListOutcomes(BaseApplicationTest, FixtureMixin):
@pytest.mark.parametrize("query_string", ("", "completed=true", "completed=false",))
def test_list_outcomes_empty(self, query_string):
res = self.client.get(
f"/outcomes?{query_string}",
)
assert res.status_code == 200
assert json.loads(res.get_data()) == {
"links": {"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", query_string, "",))},
"meta": {
"total": 0,
},
"outcomes": [],
}
def setup_outcomes(self):
user_id = self.setup_dummy_user(id=1, role='buyer')
self.setup_dummy_suppliers(5)
# create required objects for direct award-based Outcome
self.setup_dummy_services(5, model=ArchivedService)
#
# create required objects for direct-award-related Outcomes
#
projects = tuple(
DirectAwardProject(
name=name,
users=[User.query.get(user_id)],
) for name in ("alumno optimo", "palmam ferenti", "vere dignum et iustum est",)
)
db.session.add_all(projects)
searches = tuple(
DirectAwardSearch(
project=project,
created_by=user_id,
active=True,
search_url="http://nothing.nowhere",
) for project in projects
)
db.session.add_all(searches)
for i, search in enumerate(searches):
for archived_service in db.session.query(ArchivedService).filter(
ArchivedService.service_id.in_([str(j) for j in range(2000000000 + i, 2000000000 + i + 3)])
).all():
search.archived_services.append(archived_service)
#
# create required objects for Brief-related Outcomes
#
briefs = tuple(
self.setup_dummy_brief(status="closed", user_id=user_id, data={})
for _ in range(4)
)
db.session.add_all(briefs)
# increasingly many BriefResponses for each Brief in `briefs`
brief_responses = tuple(BriefResponse(
brief=brief,
supplier_id=j,
submitted_at=datetime.datetime.utcnow(),
data={},
) for j in range(i) for i, brief in enumerate(briefs))
db.session.add_all(brief_responses)
outcomes = (
Outcome(
external_id=100000000,
direct_award_project=searches[0].project,
direct_award_search=searches[0],
direct_award_archived_service=searches[0].archived_services[0],
result="awarded",
start_date=datetime.date(2006, 2, 2),
end_date=datetime.date(2006, 3, 3),
awarding_organisation_name="Omnium Gatherum",
award_value=81396,
completed_at=datetime.datetime(2005, 10, 10, 10, 10, 10),
),
Outcome(
external_id=100000005,
direct_award_project=searches[0].project,
direct_award_search=searches[0],
direct_award_archived_service=searches[0].archived_services[1],
result="awarded",
start_date=datetime.date(2006, 4, 4),
awarding_organisation_name="Nisus Formativus",
),
Outcome(
external_id=100000002,
direct_award_project=searches[0].project,
result="none-suitable",
),
Outcome(
external_id=100000011,
direct_award_project=searches[1].project,
result="none-suitable",
),
Outcome(
external_id=100000004,
direct_award_project=searches[2].project,
result="cancelled",
completed_at=datetime.datetime(2005, 10, 9, 9, 9, 9),
),
Outcome(
external_id=100000001,
brief=briefs[0],
result="cancelled",
completed_at=datetime.datetime(2005, 5, 5, 5, 5, 5),
),
Outcome(
external_id=100000008,
brief=briefs[0],
result="cancelled",
),
Outcome(
external_id=100000012,
brief=briefs[1],
brief_response=briefs[1].brief_responses[0],
result="awarded",
start_date=datetime.date(2010, 1, 1),
end_date=datetime.date(2011, 8, 8),
awarding_organisation_name="Viridum Toxicum",
award_value=81396,
completed_at=datetime.datetime(2005, 11, 11, 11, 11, 11),
),
Outcome(
external_id=100000006,
brief=briefs[1],
brief_response=briefs[1].brief_responses[0],
result="awarded",
award_value=83300,
),
Outcome(
external_id=100000009,
brief=briefs[2],
result="none-suitable",
completed_at=datetime.datetime(2005, 10, 10, 10, 11, 11),
),
Outcome(
external_id=100000013,
brief=briefs[2],
brief_response=briefs[2].brief_responses[0],
result="awarded",
start_date=datetime.date(2011, 1, 1),
end_date=datetime.date(2011, 1, 2),
award_value=3072,
),
Outcome(
external_id=100000003,
brief=briefs[2],
brief_response=briefs[2].brief_responses[1],
result="awarded",
),
Outcome(
external_id=100000007,
brief=briefs[3],
result="none-suitable",
),
Outcome(
external_id=100000010,
brief=briefs[3],
brief_response=briefs[3].brief_responses[0],
result="awarded",
start_date=datetime.date(2006, 1, 1),
end_date=datetime.date(2008, 1, 1),
awarding_organisation_name="Lacus Mortis",
award_value=4386035,
completed_at=datetime.datetime(2006, 1, 1, 1, 1, 1),
),
)
db.session.add_all(outcomes)
db.session.commit()
@pytest.mark.parametrize("query_string,expected_response_data", (
("", {
"links": {"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "", "",))},
"meta": {
"total": 14,
},
"outcomes": [
AnySupersetOf({"id": 100000001}),
AnySupersetOf({"id": 100000004}),
AnySupersetOf({"id": 100000000}),
AnySupersetOf({"id": 100000009}),
AnySupersetOf({"id": 100000012}),
AnySupersetOf({"id": 100000010}),
AnySupersetOf({"id": 100000002}),
AnySupersetOf({"id": 100000003}),
AnySupersetOf({"id": 100000005}),
AnySupersetOf({"id": 100000006}),
AnySupersetOf({"id": 100000007}),
AnySupersetOf({"id": 100000008}),
AnySupersetOf({"id": 100000011}),
AnySupersetOf({"id": 100000013}),
],
}),
("completed=true", {
"links": {
"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "completed=true", "",)),
},
"meta": {
"total": 6,
},
"outcomes": [
AnySupersetOf({"id": 100000001}),
AnySupersetOf({"id": 100000004}),
AnySupersetOf({"id": 100000000}),
AnySupersetOf({"id": 100000009}),
AnySupersetOf({"id": 100000012}),
AnySupersetOf({"id": 100000010}),
],
}),
("completed=false", {
"links": {
"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "completed=false", "",)),
},
"meta": {
"total": 8,
},
"outcomes": [
AnySupersetOf({"id": 100000002}),
AnySupersetOf({"id": 100000003}),
AnySupersetOf({"id": 100000005}),
AnySupersetOf({"id": 100000006}),
AnySupersetOf({"id": 100000007}),
AnySupersetOf({"id": 100000008}),
AnySupersetOf({"id": 100000011}),
AnySupersetOf({"id": 100000013}),
],
}),
))
def test_list_outcomes(self, query_string, expected_response_data):
self.setup_outcomes()
res = self.client.get(
f"/outcomes?{query_string}",
)
assert res.status_code == 200
response_data = json.loads(res.get_data())
# allow parameter to check its coarse constraints
assert response_data == expected_response_data
# now we'll follow that up by checking that outcomes with a particular id match their correct serialization
assert response_data["outcomes"] == [
Outcome.query.filter(Outcome.external_id == outcome_dict["id"]).one().serialize()
for outcome_dict in response_data["outcomes"]
]
def test_list_outcomes_paging(self):
self.setup_outcomes()
current_app.config["DM_API_OUTCOMES_PAGE_SIZE"] = 3
res = self.client.get(
f"/outcomes?page=2",
)
assert res.status_code == 200
response_data = json.loads(res.get_data())
assert response_data == {
"links": {
"next": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=3", "",)),
"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=2", "",)),
"prev": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=1", "",)),
"last": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=5", "",)),
},
"meta": {
"total": 14,
},
"outcomes": [
Outcome.query.filter(Outcome.external_id == expected_id).one().serialize()
for expected_id in (100000009, 100000012, 100000010,)
],
}
|
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI specific views for Organization Mentors.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>'
]
from django.utils.translation import ugettext
from soc.logic import dicts
from soc.logic.models import user as user_logic
from soc.views import out_of_band
from soc.views.helper import decorators
from soc.views.helper import lists
from soc.views.models import mentor
from soc.modules.gci.logic.models import mentor as gci_mentor_logic
from soc.modules.gci.logic.models import organization as gci_org_logic
from soc.modules.gci.logic.models import org_admin as gci_org_admin_logic
from soc.modules.gci.logic.models import program as gci_program_logic
from soc.modules.gci.logic.models import student as gci_student_logic
from soc.modules.gci.logic.models import task as gci_task_logic
from soc.modules.gci.models import task as gci_task_model
from soc.modules.gci.views.helper import access as gci_access
from soc.modules.gci.views.models import organization as gci_org_view
from soc.modules.gci.views.models import task as gci_task_view
import soc.modules.gci.logic.models.mentor
class View(mentor.View):
"""View methods for the GCI Mentor model.
"""
DEF_NO_TASKS_MSG = ugettext(
'There are no tasks affiliated to you.')
DEF_MENTOR_TASKS_MSG_FMT = ugettext('Tasks I am mentoring for %s.')
def __init__(self, params=None):
"""Defines the fields and methods required for the mentor View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = gci_access.GCIChecker(params)
rights['create'] = ['checkIsDeveloper']
rights['edit'] = [('checkIsMyActiveRole', gci_mentor_logic.logic)]
rights['delete'] = ['checkIsDeveloper']
rights['invite'] = [('checkHasRoleForScope',
gci_org_admin_logic.logic)]
rights['accept_invite'] = [
('checkIsMyRequestWithStatus', [['group_accepted']]),
('checkIsNotStudentForProgramOfOrgInRequest',
[gci_org_logic.logic, gci_student_logic.logic])]
rights['request'] = [
('checkIsNotStudentForProgramOfOrg',
[gci_org_logic.logic, gci_student_logic.logic]),
('checkCanMakeRequestToGroup', gci_org_logic.logic)]
rights['process_request'] = [
('checkCanProcessRequest', [[gci_org_admin_logic.logic]])]
rights['manage'] = [
('checkIsAllowedToManageRole', [gci_mentor_logic.logic,
gci_org_admin_logic.logic])]
rights['list_mentor_tasks'] = [
('checkCanOpenTaskList', [gci_mentor_logic.logic, 'gci/mentor']),
('checkIsAfterEvent', ['accepted_organization_announced_deadline',
'__all__', gci_program_logic.logic])]
new_params = {}
new_params['logic'] = soc.modules.gci.logic.models.mentor.logic
new_params['group_logic'] = gci_org_logic.logic
new_params['group_view'] = gci_org_view.view
new_params['rights'] = rights
new_params['scope_view'] = gci_org_view
new_params['name'] = "GCI Mentor"
new_params['module_name'] = "mentor"
new_params['sidebar_grouping'] = 'Organizations'
new_params['module_package'] = 'soc.modules.gci.views.models'
new_params['url_name'] = 'gci/mentor'
new_params['role'] = 'gci/mentor'
patterns = []
patterns += [
(r'^%(url_name)s/(?P<access_type>list_mentor_tasks)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.list_mentor_tasks',
'List Mentor tasks')]
new_params['extra_django_patterns'] = patterns
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
def getListMentorTasksData(self, request, params, filter):
"""Returns the list data for Organization Tasks list.
Args:
request: HTTPRequest object
params: params of the task entity for the list
filter: properties on which the tasks must be listed
"""
idx = lists.getListIndex(request)
# default list settings
visibility = 'public'
if idx == 0:
all_d = gci_task_model.TaskDifficultyTag.all().fetch(100)
all_t = gci_task_model.TaskTypeTag.all().fetch(100)
args = [all_d, all_t]
contents = lists.getListData(request, params, filter,
visibility=visibility, args=args)
else:
return lists.getErrorResponse(request, "idx not valid")
return lists.getResponse(request, contents)
@decorators.merge_params
@decorators.check_access
def listMentorTasks(self, request, access_type, page_name=None,
params=None, **kwargs):
"""Displays a list of all tasks for a given student.
See base.View.list() for more details.
"""
entity = params['logic'].getFromKeyFieldsOr404(kwargs)
# obtain program entity based on request params
program = entity.program
user_account = user_logic.logic.getCurrentUser()
filter = {
'user': user_account,
'program': program,
'status': 'active'
}
list_params = gci_task_view.view.getParams().copy()
list_params['list_description'] = self.DEF_MENTOR_TASKS_MSG_FMT % (
program.name)
filter = {
'program': program,
'mentors': [entity],
'status': ['Unapproved', 'Unpublished', 'Open', 'Reopened',
'ClaimRequested', 'Claimed', 'ActionNeeded', 'Closed',
'AwaitingRegistration', 'NeedsWork', 'NeedsReview']
}
if lists.isDataRequest(request):
return self.getListMentorTasksData(request, list_params,
filter)
tasks = gci_task_logic.logic.getForFields(filter=filter, unique=True)
contents = []
if tasks:
order = ['modified_on']
tasks_list = lists.getListGenerator(request, list_params,
order=order, idx=0)
contents.append(tasks_list)
if contents:
return self._list(request, list_params, contents, page_name)
else:
raise out_of_band.Error(self.DEF_NO_TASKS_MSG)
view = View()
accept_invite = decorators.view(view.acceptInvite)
admin = decorators.view(view.admin)
create = decorators.view(view.create)
delete = decorators.view(view.delete)
edit = decorators.view(view.edit)
invite = decorators.view(view.invite)
list = decorators.view(view.list)
list_mentor_tasks = decorators.view(view.listMentorTasks)
manage = decorators.view(view.manage)
process_request = decorators.view(view.processRequest)
role_request = decorators.view(view.request)
public = decorators.view(view.public)
export = decorators.view(view.export)
pick = decorators.view(view.pick)
|
|
"""
Tests for dataset creation
"""
import random
import math
import unittest
import os
import numpy as np
import deepchem as dc
try:
import torch # noqa
PYTORCH_IMPORT_FAILED = False
except ImportError:
PYTORCH_IMPORT_FAILED = True
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
input_file = os.path.join(current_dir, "../../models/tests/example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
return loader.create_dataset(input_file)
def load_multitask_data():
"""Load example multitask data."""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = [
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
]
input_file = os.path.join(current_dir,
"../../models/tests/multitask_example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
return loader.create_dataset(input_file)
class TestTransformer(dc.trans.Transformer):
def transform_array(self, X, y, w, ids):
return (2 * X, 1.5 * y, w, ids)
def test_transform_disk():
"""Test that the transform() method works for DiskDatasets."""
dataset = load_solubility_data()
X = dataset.X
y = dataset.y
w = dataset.w
ids = dataset.ids
# Transform it
transformer = TestTransformer(transform_X=True, transform_y=True)
for parallel in (True, False):
transformed = dataset.transform(transformer, parallel=parallel)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2 * X, transformed.X)
np.testing.assert_array_equal(1.5 * y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_sparsify_and_densify():
"""Test that sparsify and densify work as inverses."""
# Test on identity matrix
num_samples = 10
num_features = num_samples
X = np.eye(num_samples)
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Generate random sparse features dataset
np.random.seed(123)
p = .05
X = np.random.binomial(1, p, size=(num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Test edge case with array of all zeros
X = np.zeros((num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
def test_pad_features():
"""Test that pad_features pads features correctly."""
batch_size = 100
num_features = 10
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
def test_pad_batches():
"""Test that pad_batch pads batches correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
def test_get_task_names():
"""Test that get_task_names returns correct task_names"""
solubility_dataset = load_solubility_data()
assert solubility_dataset.get_task_names() == ["log-solubility"]
multitask_dataset = load_multitask_data()
assert sorted(multitask_dataset.get_task_names()) == sorted([
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
])
def test_get_data_shape():
"""Test that get_data_shape returns currect data shape"""
solubility_dataset = load_solubility_data()
assert solubility_dataset.get_data_shape() == (1024,)
multitask_dataset = load_multitask_data()
assert multitask_dataset.get_data_shape() == (1024,)
def test_len():
"""Test that len(dataset) works."""
solubility_dataset = load_solubility_data()
assert len(solubility_dataset) == 10
def test_reshard():
"""Test that resharding the dataset works."""
solubility_dataset = load_solubility_data()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 1
solubility_dataset.reshard(shard_size=1)
assert solubility_dataset.get_shard_size() == 1
X_r, y_r, w_r, ids_r = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 10
solubility_dataset.reshard(shard_size=10)
assert solubility_dataset.get_shard_size() == 10
X_rr, y_rr, w_rr, ids_rr = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Test first resharding worked
np.testing.assert_array_equal(X, X_r)
np.testing.assert_array_equal(y, y_r)
np.testing.assert_array_equal(w, w_r)
np.testing.assert_array_equal(ids, ids_r)
# Test second resharding worked
np.testing.assert_array_equal(X, X_rr)
np.testing.assert_array_equal(y, y_rr)
np.testing.assert_array_equal(w, w_rr)
np.testing.assert_array_equal(ids, ids_rr)
def test_complete_shuffle():
shard_sizes = [1, 2, 3, 4, 5]
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
y_b = np.random.rand(sz, 1)
w_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ys.append(y_b)
all_ws.append(w_b)
all_ids.append(ids_b)
yield X_b, y_b, w_b, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
res = dataset.complete_shuffle()
# approx 1/15! chance of equality
np.testing.assert_equal(np.any(np.not_equal(dataset.X, res.X)), True)
np.testing.assert_equal(np.any(np.not_equal(dataset.y, res.w)), True)
np.testing.assert_equal(np.any(np.not_equal(dataset.w, res.y)), True)
np.testing.assert_equal(np.any(np.not_equal(dataset.ids, res.ids)), True)
np.testing.assert_array_equal(
np.sort(dataset.X, axis=0), np.sort(res.X, axis=0))
np.testing.assert_array_equal(
np.sort(dataset.y, axis=0), np.sort(res.y, axis=0))
np.testing.assert_array_equal(
np.sort(dataset.w, axis=0), np.sort(res.w, axis=0))
np.testing.assert_array_equal(np.sort(dataset.ids), np.sort(res.ids))
def test_iterbatches():
"""Test that iterating over batches of data works."""
solubility_dataset = load_solubility_data()
batch_size = 2
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
for (X_b, y_b, w_b, ids_b) in solubility_dataset.iterbatches(batch_size):
assert X_b.shape == (batch_size,) + data_shape
assert y_b.shape == (batch_size,) + (len(tasks),)
assert w_b.shape == (batch_size,) + (len(tasks),)
assert ids_b.shape == (batch_size,)
def test_itersamples_numpy():
"""Test that iterating over samples in a NumpyDataset works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
for i, (sx, sy, sw, sid) in enumerate(dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_itersamples_disk():
"""Test that iterating over samples in a DiskDataset works."""
solubility_dataset = load_solubility_data()
X = solubility_dataset.X
y = solubility_dataset.y
w = solubility_dataset.w
ids = solubility_dataset.ids
for i, (sx, sy, sw, sid) in enumerate(solubility_dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_transform_numpy():
"""Test that the transform() method works for NumpyDatasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Transform it
transformer = TestTransformer(transform_X=True, transform_y=True)
transformed = dataset.transform(transformer)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2 * X, transformed.X)
np.testing.assert_array_equal(1.5 * y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_to_numpy():
"""Test that transformation to numpy arrays is sensible."""
solubility_dataset = load_solubility_data()
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
N_samples = len(solubility_dataset)
N_tasks = len(tasks)
assert X.shape == (N_samples,) + data_shape
assert y.shape == (N_samples, N_tasks)
assert w.shape == (N_samples, N_tasks)
assert ids.shape == (N_samples,)
def test_consistent_ordering():
"""Test that ordering of labels is consistent over time."""
solubility_dataset = load_solubility_data()
ids1 = solubility_dataset.ids
ids2 = solubility_dataset.ids
assert np.array_equal(ids1, ids2)
def test_get_statistics():
"""Test statistics computation of this dataset."""
solubility_dataset = load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
X_means, y_means = np.mean(X, axis=0), np.mean(y, axis=0)
X_stds, y_stds = np.std(X, axis=0), np.std(y, axis=0)
comp_X_means, comp_X_stds, comp_y_means, comp_y_stds = \
solubility_dataset.get_statistics()
np.testing.assert_allclose(comp_X_means, X_means)
np.testing.assert_allclose(comp_y_means, y_means)
np.testing.assert_allclose(comp_X_stds, X_stds)
np.testing.assert_allclose(comp_y_stds, y_stds)
def test_disk_iterate_batch_size():
solubility_dataset = load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
batch_sizes = []
for X, y, _, _ in solubility_dataset.iterbatches(
3, epochs=2, pad_batches=False, deterministic=True):
batch_sizes.append(len(X))
assert [3, 3, 3, 1, 3, 3, 3, 1] == batch_sizes
def test_disk_pad_batches():
shard_sizes = [21, 11, 41, 21, 51]
batch_size = 10
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
y_b = np.random.rand(sz, 1)
w_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ys.append(y_b)
all_ws.append(w_b)
all_ids.append(ids_b)
yield X_b, y_b, w_b, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
all_Xs = np.concatenate(all_Xs, axis=0)
all_ys = np.concatenate(all_ys, axis=0)
all_ws = np.concatenate(all_ws, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
test_Xs, test_ys, test_ws, test_ids = [], [], [], []
for bidx, (a, b, c, d) in enumerate(
dataset.iterbatches(
batch_size=batch_size, pad_batches=True, deterministic=True)):
test_Xs.append(a)
test_ys.append(b)
test_ws.append(c)
test_ids.append(d)
test_Xs = np.concatenate(test_Xs, axis=0)
test_ys = np.concatenate(test_ys, axis=0)
test_ws = np.concatenate(test_ws, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
total_size = sum(shard_sizes)
assert bidx == math.ceil(total_size / batch_size) - 1
expected_batches = math.ceil(total_size / batch_size) * batch_size
assert len(test_Xs) == expected_batches
assert len(test_ys) == expected_batches
assert len(test_ws) == expected_batches
assert len(test_ids) == expected_batches
np.testing.assert_array_equal(all_Xs, test_Xs[:total_size, :])
np.testing.assert_array_equal(all_ys, test_ys[:total_size, :])
np.testing.assert_array_equal(all_ws, test_ws[:total_size, :])
np.testing.assert_array_equal(all_ids, test_ids[:total_size])
def test_disk_iterate_y_w_None():
shard_sizes = [21, 11, 41, 21, 51]
batch_size = 10
all_Xs, all_ids = [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ids.append(ids_b)
yield X_b, None, None, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
all_Xs = np.concatenate(all_Xs, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
test_Xs, test_ids = [], []
for bidx, (a, _, _, d) in enumerate(
dataset.iterbatches(
batch_size=batch_size, pad_batches=True, deterministic=True)):
test_Xs.append(a)
test_ids.append(d)
test_Xs = np.concatenate(test_Xs, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
total_size = sum(shard_sizes)
assert bidx == math.ceil(total_size / batch_size) - 1
expected_batches = math.ceil(total_size / batch_size) * batch_size
assert len(test_Xs) == expected_batches
assert len(test_ids) == expected_batches
np.testing.assert_array_equal(all_Xs, test_Xs[:total_size, :])
np.testing.assert_array_equal(all_ids, test_ids[:total_size])
def test_disk_iterate_batch():
all_batch_sizes = [None, 32, 17, 11]
all_shard_sizes = [[7, 3, 12, 4, 5], [1, 1, 1, 1, 1], [31, 31, 31, 31, 31],
[21, 11, 41, 21, 51]]
for idx in range(25):
shard_length = random.randint(1, 32)
shard_sizes = []
for _ in range(shard_length):
shard_sizes.append(random.randint(1, 128))
all_shard_sizes.append(shard_sizes)
if idx == 0:
# special case to test
all_batch_sizes.append(None)
else:
all_batch_sizes.append(random.randint(1, 256))
for shard_sizes, batch_size in zip(all_shard_sizes, all_batch_sizes):
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
y_b = np.random.rand(sz, 1)
w_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ys.append(y_b)
all_ws.append(w_b)
all_ids.append(ids_b)
yield X_b, y_b, w_b, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
all_Xs = np.concatenate(all_Xs, axis=0)
all_ys = np.concatenate(all_ys, axis=0)
all_ws = np.concatenate(all_ws, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
total_size = sum(shard_sizes)
assert dataset.X.shape[0] == total_size
# deterministic
test_Xs, test_ys, test_ws, test_ids = [], [], [], []
for bidx, (a, b, c, d) in enumerate(
dataset.iterbatches(
batch_size=batch_size, pad_batches=False, deterministic=True)):
test_Xs.append(a)
test_ys.append(b)
test_ws.append(c)
test_ids.append(d)
if batch_size is None:
for idx, (tx, ty, tw, tids) in enumerate(
zip(test_Xs, test_ys, test_ws, test_ids)):
assert len(tx) == shard_sizes[idx]
assert len(ty) == shard_sizes[idx]
assert len(tw) == shard_sizes[idx]
assert len(tids) == shard_sizes[idx]
test_Xs = np.concatenate(test_Xs, axis=0)
test_ys = np.concatenate(test_ys, axis=0)
test_ws = np.concatenate(test_ws, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
if batch_size is None:
assert bidx == len(shard_sizes) - 1
else:
assert bidx == math.ceil(total_size / batch_size) - 1
np.testing.assert_array_equal(all_Xs, test_Xs)
np.testing.assert_array_equal(all_ys, test_ys)
np.testing.assert_array_equal(all_ws, test_ws)
np.testing.assert_array_equal(all_ids, test_ids)
# non-deterministic
test_Xs, test_ys, test_ws, test_ids = [], [], [], []
for bidx, (a, b, c, d) in enumerate(
dataset.iterbatches(
batch_size=batch_size, pad_batches=False, deterministic=False)):
test_Xs.append(a)
test_ys.append(b)
test_ws.append(c)
test_ids.append(d)
# we don't know the order in which the shards are iterated in.
test_Xs = np.concatenate(test_Xs, axis=0)
test_ys = np.concatenate(test_ys, axis=0)
test_ws = np.concatenate(test_ws, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
if batch_size is None:
assert bidx == len(shard_sizes) - 1
else:
assert bidx == math.ceil(total_size / batch_size) - 1
np.testing.assert_array_equal(
np.sort(all_Xs, axis=0), np.sort(test_Xs, axis=0))
np.testing.assert_array_equal(
np.sort(all_ys, axis=0), np.sort(test_ys, axis=0))
np.testing.assert_array_equal(
np.sort(all_ws, axis=0), np.sort(test_ws, axis=0))
np.testing.assert_array_equal(
np.sort(all_ids, axis=0), np.sort(test_ids, axis=0))
def test_merge():
"""Test that dataset merge works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
num_datasets = 4
datasets = []
for i in range(num_datasets):
Xi = np.random.rand(num_datapoints, num_features)
yi = np.random.randint(2, size=(num_datapoints, num_tasks))
wi = np.ones((num_datapoints, num_tasks))
idsi = np.array(["id"] * num_datapoints)
dataseti = dc.data.DiskDataset.from_numpy(Xi, yi, wi, idsi)
datasets.append(dataseti)
new_data = dc.data.datasets.DiskDataset.merge(datasets)
# Check that we have all the data in
assert new_data.X.shape == (num_datapoints * num_datasets, num_features)
assert new_data.y.shape == (num_datapoints * num_datasets, num_tasks)
assert len(new_data.tasks) == len(datasets[0].tasks)
def test_make_tf_dataset():
"""Test creating a Tensorflow Iterator from a Dataset."""
X = np.random.random((100, 5))
y = np.random.random((100, 1))
dataset = dc.data.NumpyDataset(X, y)
iterator = dataset.make_tf_dataset(
batch_size=10, epochs=2, deterministic=True)
for i, (batch_X, batch_y, batch_w) in enumerate(iterator):
offset = (i % 10) * 10
np.testing.assert_array_equal(X[offset:offset + 10, :], batch_X)
np.testing.assert_array_equal(y[offset:offset + 10, :], batch_y)
np.testing.assert_array_equal(np.ones((10, 1)), batch_w)
assert i == 19
def _validate_pytorch_dataset(dataset):
X = dataset.X
y = dataset.y
w = dataset.w
ids = dataset.ids
n_samples = X.shape[0]
# Test iterating in order.
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=True)
for i, (iter_X, iter_y, iter_w, iter_id) in enumerate(ds):
j = i % n_samples
np.testing.assert_array_equal(X[j, :], iter_X)
np.testing.assert_array_equal(y[j, :], iter_y)
np.testing.assert_array_equal(w[j, :], iter_w)
assert ids[j] == iter_id
assert i == 2 * n_samples - 1
# Test iterating out of order.
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=False)
id_to_index = dict((id, i) for i, id in enumerate(ids))
id_count = dict((id, 0) for id in ids)
for iter_X, iter_y, iter_w, iter_id in ds:
j = id_to_index[iter_id]
np.testing.assert_array_equal(X[j, :], iter_X)
np.testing.assert_array_equal(y[j, :], iter_y)
np.testing.assert_array_equal(w[j, :], iter_w)
id_count[iter_id] += 1
assert all(id_count[id] == 2 for id in ids)
# Test iterating in batches.
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=False, batch_size=7)
id_to_index = dict((id, i) for i, id in enumerate(ids))
id_count = dict((id, 0) for id in ids)
for iter_X, iter_y, iter_w, iter_id in ds:
size = len(iter_id)
assert size <= 7
for i in range(size):
j = id_to_index[iter_id[i]]
np.testing.assert_array_equal(X[j, :], iter_X[i])
np.testing.assert_array_equal(y[j, :], iter_y[i])
np.testing.assert_array_equal(w[j, :], iter_w[i])
id_count[iter_id[i]] += 1
assert all(id_count[id] == 2 for id in ids)
# Test iterating with multiple workers.
import torch # noqa
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=False)
loader = torch.utils.data.DataLoader(ds, num_workers=3)
id_count = dict((id, 0) for id in ids)
for iter_X, iter_y, iter_w, iter_id in loader:
j = id_to_index[iter_id[0]]
np.testing.assert_array_equal(X[j, :], iter_X[0])
np.testing.assert_array_equal(y[j, :], iter_y[0])
np.testing.assert_array_equal(w[j, :], iter_w[0])
id_count[iter_id[0]] += 1
assert all(id_count[id] == 2 for id in ids)
def test_dataframe():
"""Test converting between Datasets and DataFrames."""
dataset = load_solubility_data()
# A round trip from Dataset to DataFrame to Dataset should produce identical arrays.
df = dataset.to_dataframe()
dataset2 = dc.data.Dataset.from_dataframe(df)
np.testing.assert_array_equal(dataset.X, dataset2.X)
np.testing.assert_array_equal(dataset.y, dataset2.y)
np.testing.assert_array_equal(dataset.w, dataset2.w)
np.testing.assert_array_equal(dataset.ids, dataset2.ids)
# Try specifying particular columns.
dataset3 = dc.data.Dataset.from_dataframe(
df, X=['X2', 'X4'], y='w', w=['y', 'X1'])
np.testing.assert_array_equal(dataset.X[:, (1, 3)], dataset3.X)
np.testing.assert_array_equal(dataset.w, dataset3.y)
np.testing.assert_array_equal(
np.stack([dataset.y[:, 0], dataset.X[:, 0]], axis=1), dataset3.w)
def test_to_str():
"""Tests to string representation of Dataset."""
dataset = dc.data.NumpyDataset(
X=np.random.rand(5, 3), y=np.random.rand(5,), ids=np.arange(5))
ref_str = '<NumpyDataset X.shape: (5, 3), y.shape: (5,), w.shape: (5,), ids: [0 1 2 3 4], task_names: [0]>'
assert str(dataset) == ref_str
# Test id shrinkage
dc.utils.set_print_threshold(10)
dataset = dc.data.NumpyDataset(
X=np.random.rand(50, 3), y=np.random.rand(50,), ids=np.arange(50))
ref_str = '<NumpyDataset X.shape: (50, 3), y.shape: (50,), w.shape: (50,), ids: [0 1 2 ... 47 48 49], task_names: [0]>'
assert str(dataset) == ref_str
# Test task shrinkage
dataset = dc.data.NumpyDataset(
X=np.random.rand(50, 3), y=np.random.rand(50, 20), ids=np.arange(50))
ref_str = '<NumpyDataset X.shape: (50, 3), y.shape: (50, 20), w.shape: (50, 1), ids: [0 1 2 ... 47 48 49], task_names: [ 0 1 2 ... 17 18 19]>'
assert str(dataset) == ref_str
# Test max print size
dc.utils.set_max_print_size(25)
dataset = dc.data.NumpyDataset(
X=np.random.rand(50, 3), y=np.random.rand(50,), ids=np.arange(50))
ref_str = '<NumpyDataset X.shape: (50, 3), y.shape: (50,), w.shape: (50,), task_names: [0]>'
assert str(dataset) == ref_str
class TestDatasets(unittest.TestCase):
"""
Test basic top-level API for dataset objects.
"""
def test_numpy_iterate_batch_size(self):
solubility_dataset = load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = dc.data.NumpyDataset.from_DiskDataset(
solubility_dataset)
batch_sizes = []
for X, y, _, _ in solubility_dataset.iterbatches(
3, epochs=2, pad_batches=False, deterministic=True):
batch_sizes.append(len(X))
self.assertEqual([3, 3, 3, 1, 3, 3, 3, 1], batch_sizes)
@unittest.skipIf(PYTORCH_IMPORT_FAILED, 'PyTorch is not installed')
def test_make_pytorch_dataset_from_numpy(self):
"""Test creating a PyTorch Dataset from a NumpyDataset."""
X = np.random.random((100, 5))
y = np.random.random((100, 1))
ids = [str(i) for i in range(100)]
dataset = dc.data.NumpyDataset(X, y, ids=ids)
_validate_pytorch_dataset(dataset)
@unittest.skipIf(PYTORCH_IMPORT_FAILED, 'PyTorch is not installed')
def test_make_pytorch_dataset_from_images(self):
"""Test creating a PyTorch Dataset from an ImageDataset."""
path = os.path.join(os.path.dirname(__file__), 'images')
files = [os.path.join(path, f) for f in os.listdir(path)]
y = np.random.random((10, 1))
ids = [str(i) for i in range(len(files))]
dataset = dc.data.ImageDataset(files, y, ids=ids)
_validate_pytorch_dataset(dataset)
@unittest.skipIf(PYTORCH_IMPORT_FAILED, 'PyTorch is not installed')
def test_make_pytorch_dataset_from_disk(self):
"""Test creating a PyTorch Dataset from a DiskDataset."""
dataset = load_solubility_data()
_validate_pytorch_dataset(dataset)
|
|
"""DHCPv4 options part5"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_msg
import srv_control
import misc
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_nisplus_domain_name():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('nisplus-domain-name', 'nisplus-domain.com')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(64)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(64)
srv_msg.response_check_option_content(64, 'value', 'nisplus-domain.com')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_nisplus_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('nisplus-servers', '199.1.1.1,200.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(65)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(65)
srv_msg.response_check_option_content(65, 'value', '200.1.1.2')
srv_msg.response_check_option_content(65, 'value', '199.1.1.1')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_mobile_ip_home_agent():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('mobile-ip-home-agent', '166.1.1.1,177.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(68)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(68)
srv_msg.response_check_option_content(68, 'value', '166.1.1.1')
srv_msg.response_check_option_content(68, 'value', '177.1.1.2')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_smtp_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('smtp-server', '199.1.1.1,200.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(69)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(69)
srv_msg.response_check_option_content(69, 'value', '200.1.1.2')
srv_msg.response_check_option_content(69, 'value', '199.1.1.1')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_pop_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('pop-server', '199.1.1.1,200.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(70)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(70)
srv_msg.response_check_option_content(70, 'value', '200.1.1.2')
srv_msg.response_check_option_content(70, 'value', '199.1.1.1')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_nntp_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('nntp-server', '199.1.1.1,200.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(71)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(71)
srv_msg.response_check_option_content(71, 'value', '200.1.1.2')
srv_msg.response_check_option_content(71, 'value', '199.1.1.1')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_www_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('www-server', '199.1.1.1,200.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(72)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(72)
srv_msg.response_check_option_content(72, 'value', '200.1.1.2')
srv_msg.response_check_option_content(72, 'value', '199.1.1.1')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_finger_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('finger-server', '199.1.1.1,200.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(73)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(73)
srv_msg.response_check_option_content(73, 'value', '200.1.1.2')
srv_msg.response_check_option_content(73, 'value', '199.1.1.1')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_irc_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('irc-server', '199.1.1.1,200.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(74)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(74)
srv_msg.response_check_option_content(74, 'value', '200.1.1.2')
srv_msg.response_check_option_content(74, 'value', '199.1.1.1')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_streettalk_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('streettalk-server', '199.1.1.1,200.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(75)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(75)
srv_msg.response_check_option_content(75, 'value', '200.1.1.2')
srv_msg.response_check_option_content(75, 'value', '199.1.1.1')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_streettalk_directory_assistance_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('streettalk-directory-assistance-server', '199.1.1.1,200.1.1.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(76)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(76)
srv_msg.response_check_option_content(76, 'value', '200.1.1.2')
srv_msg.response_check_option_content(76, 'value', '199.1.1.1')
@pytest.mark.v4
@pytest.mark.options
def test_v4_options_not_requested_options():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('routers', '100.100.100.10,50.50.50.5')
srv_control.config_srv_opt('domain-name-servers', '199.199.199.1,100.100.100.1')
# this should include fqdn option, 15
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(6)
srv_msg.response_check_option_content(6, 'value', '199.199.199.1')
srv_msg.response_check_option_content(6, 'value', '100.100.100.1')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'value', '100.100.100.10')
srv_msg.response_check_option_content(3, 'value', '50.50.50.5')
# future tests:
# vendor-class-identifier 60 binary false
# nwip-suboptions 63 binary false
# user_class 77 binary false
# authenticate 90 binary false
# domain-search 119 binary false
# vivco-suboptions 124 binary false
# vivso-suboptions 125 binary
|
|
"""The tests for the Netatmo climate platform."""
from unittest.mock import Mock, patch
import pytest
from homeassistant.components.climate import (
DOMAIN as CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_TEMPERATURE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_PRESET_MODE,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
)
from homeassistant.components.netatmo import climate
from homeassistant.components.netatmo.climate import (
NA_THERM,
NA_VALVE,
PRESET_FROST_GUARD,
PRESET_SCHEDULE,
)
from homeassistant.components.netatmo.const import (
ATTR_SCHEDULE_NAME,
SERVICE_SET_SCHEDULE,
)
from homeassistant.const import ATTR_ENTITY_ID, ATTR_TEMPERATURE, CONF_WEBHOOK_ID
from .common import simulate_webhook
async def test_webhook_event_handling_thermostats(hass, climate_entry):
"""Test service and webhook event handling with thermostats."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_livingroom = "climate.netatmo_livingroom"
assert hass.states.get(climate_entity_livingroom).state == "auto"
assert (
hass.states.get(climate_entity_livingroom).attributes["preset_mode"]
== "Schedule"
)
assert hass.states.get(climate_entity_livingroom).attributes["temperature"] == 12
# Test service setting the temperature
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: climate_entity_livingroom, ATTR_TEMPERATURE: 21},
blocking=True,
)
await hass.async_block_till_done()
# Fake webhook thermostat manual set point
response = {
"room_id": "2746182631",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2746182631",
"name": "Livingroom",
"type": "livingroom",
"therm_setpoint_mode": "manual",
"therm_setpoint_temperature": 21,
"therm_setpoint_end_time": 1612734552,
}
],
"modules": [
{"id": "12:34:56:00:01:ae", "name": "Livingroom", "type": "NATherm1"}
],
},
"mode": "manual",
"event_type": "set_point",
"temperature": 21,
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_livingroom).state == "heat"
assert (
hass.states.get(climate_entity_livingroom).attributes["preset_mode"]
== "Schedule"
)
assert hass.states.get(climate_entity_livingroom).attributes["temperature"] == 21
# Test service setting the HVAC mode to "heat"
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: climate_entity_livingroom, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
await hass.async_block_till_done()
# Fake webhook thermostat mode change to "Max"
response = {
"room_id": "2746182631",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2746182631",
"name": "Livingroom",
"type": "livingroom",
"therm_setpoint_mode": "max",
"therm_setpoint_end_time": 1612749189,
}
],
"modules": [
{"id": "12:34:56:00:01:ae", "name": "Livingroom", "type": "NATherm1"}
],
},
"mode": "max",
"event_type": "set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_livingroom).state == "heat"
assert hass.states.get(climate_entity_livingroom).attributes["temperature"] == 30
# Test service setting the HVAC mode to "off"
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: climate_entity_livingroom, ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
await hass.async_block_till_done()
# Fake webhook turn thermostat off
response = {
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2746182631",
"name": "Livingroom",
"type": "livingroom",
"therm_setpoint_mode": "off",
}
],
"modules": [
{"id": "12:34:56:00:01:ae", "name": "Livingroom", "type": "NATherm1"}
],
},
"mode": "off",
"event_type": "set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_livingroom).state == "off"
# Test service setting the HVAC mode to "auto"
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: climate_entity_livingroom, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
await hass.async_block_till_done()
# Fake webhook thermostat mode cancel set point
response = {
"room_id": "2746182631",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2746182631",
"name": "Livingroom",
"type": "livingroom",
"therm_setpoint_mode": "home",
}
],
"modules": [
{"id": "12:34:56:00:01:ae", "name": "Livingroom", "type": "NATherm1"}
],
},
"mode": "home",
"event_type": "cancel_set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_livingroom).state == "auto"
assert (
hass.states.get(climate_entity_livingroom).attributes["preset_mode"]
== "Schedule"
)
async def test_service_preset_mode_frost_guard_thermostat(hass, climate_entry):
"""Test service with frost guard preset for thermostats."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_livingroom = "climate.netatmo_livingroom"
assert hass.states.get(climate_entity_livingroom).state == "auto"
assert (
hass.states.get(climate_entity_livingroom).attributes["preset_mode"]
== "Schedule"
)
# Test service setting the preset mode to "frost guard"
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{
ATTR_ENTITY_ID: climate_entity_livingroom,
ATTR_PRESET_MODE: PRESET_FROST_GUARD,
},
blocking=True,
)
await hass.async_block_till_done()
# Fake webhook thermostat mode change to "Frost Guard"
response = {
"event_type": "therm_mode",
"home": {"id": "91763b24c43d3e344f424e8b", "therm_mode": "hg"},
"mode": "hg",
"previous_mode": "schedule",
"push_type": "home_event_changed",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_livingroom).state == "auto"
assert (
hass.states.get(climate_entity_livingroom).attributes["preset_mode"]
== "Frost Guard"
)
# Test service setting the preset mode to "frost guard"
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{
ATTR_ENTITY_ID: climate_entity_livingroom,
ATTR_PRESET_MODE: PRESET_SCHEDULE,
},
blocking=True,
)
await hass.async_block_till_done()
# Test webhook thermostat mode change to "Schedule"
response = {
"event_type": "therm_mode",
"home": {"id": "91763b24c43d3e344f424e8b", "therm_mode": "schedule"},
"mode": "schedule",
"previous_mode": "hg",
"push_type": "home_event_changed",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_livingroom).state == "auto"
assert (
hass.states.get(climate_entity_livingroom).attributes["preset_mode"]
== "Schedule"
)
async def test_service_preset_modes_thermostat(hass, climate_entry):
"""Test service with preset modes for thermostats."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_livingroom = "climate.netatmo_livingroom"
assert hass.states.get(climate_entity_livingroom).state == "auto"
assert (
hass.states.get(climate_entity_livingroom).attributes["preset_mode"]
== "Schedule"
)
# Test service setting the preset mode to "away"
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: climate_entity_livingroom, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
await hass.async_block_till_done()
# Fake webhook thermostat mode change to "Away"
response = {
"event_type": "therm_mode",
"home": {"id": "91763b24c43d3e344f424e8b", "therm_mode": "away"},
"mode": "away",
"previous_mode": "schedule",
"push_type": "home_event_changed",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_livingroom).state == "auto"
assert (
hass.states.get(climate_entity_livingroom).attributes["preset_mode"] == "away"
)
# Test service setting the preset mode to "boost"
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: climate_entity_livingroom, ATTR_PRESET_MODE: PRESET_BOOST},
blocking=True,
)
await hass.async_block_till_done()
# Test webhook thermostat mode change to "Max"
response = {
"room_id": "2746182631",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2746182631",
"name": "Livingroom",
"type": "livingroom",
"therm_setpoint_mode": "max",
"therm_setpoint_end_time": 1612749189,
}
],
"modules": [
{"id": "12:34:56:00:01:ae", "name": "Livingroom", "type": "NATherm1"}
],
},
"mode": "max",
"event_type": "set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_livingroom).state == "heat"
assert hass.states.get(climate_entity_livingroom).attributes["temperature"] == 30
async def test_webhook_event_handling_no_data(hass, climate_entry):
"""Test service and webhook event handling with erroneous data."""
# Test webhook without home entry
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
response = {
"push_type": "home_event_changed",
}
await simulate_webhook(hass, webhook_id, response)
# Test webhook with different home id
response = {
"home_id": "3d3e344f491763b24c424e8b",
"room_id": "2746182631",
"home": {
"id": "3d3e344f491763b24c424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [],
"modules": [],
},
"mode": "home",
"event_type": "cancel_set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
# Test webhook without room entries
response = {
"room_id": "2746182631",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [],
"modules": [],
},
"mode": "home",
"event_type": "cancel_set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
async def test_service_schedule_thermostats(hass, climate_entry, caplog):
"""Test service for selecting Netatmo schedule with thermostats."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_livingroom = "climate.netatmo_livingroom"
# Test setting a valid schedule
with patch(
"pyatmo.thermostat.HomeData.switch_home_schedule"
) as mock_switch_home_schedule:
await hass.services.async_call(
"netatmo",
SERVICE_SET_SCHEDULE,
{ATTR_ENTITY_ID: climate_entity_livingroom, ATTR_SCHEDULE_NAME: "Winter"},
blocking=True,
)
await hass.async_block_till_done()
mock_switch_home_schedule.assert_called_once_with(
home_id="91763b24c43d3e344f424e8b", schedule_id="b1b54a2f45795764f59d50d8"
)
# Fake backend response for valve being turned on
response = {
"event_type": "schedule",
"schedule_id": "b1b54a2f45795764f59d50d8",
"previous_schedule_id": "59d32176d183948b05ab4dce",
"push_type": "home_event_changed",
}
await simulate_webhook(hass, webhook_id, response)
assert (
hass.states.get(climate_entity_livingroom).attributes["selected_schedule"]
== "Winter"
)
# Test setting an invalid schedule
with patch(
"pyatmo.thermostat.HomeData.switch_home_schedule"
) as mock_switch_home_schedule:
await hass.services.async_call(
"netatmo",
SERVICE_SET_SCHEDULE,
{ATTR_ENTITY_ID: climate_entity_livingroom, ATTR_SCHEDULE_NAME: "summer"},
blocking=True,
)
await hass.async_block_till_done()
mock_switch_home_schedule.assert_not_called()
assert "summer is not a invalid schedule" in caplog.text
async def test_service_preset_mode_already_boost_valves(hass, climate_entry):
"""Test service with boost preset for valves when already in boost mode."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_entrada = "climate.netatmo_entrada"
assert hass.states.get(climate_entity_entrada).state == "auto"
assert (
hass.states.get(climate_entity_entrada).attributes["preset_mode"]
== "Frost Guard"
)
assert hass.states.get(climate_entity_entrada).attributes["temperature"] == 7
# Test webhook valve mode change to "Max"
response = {
"room_id": "2833524037",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2833524037",
"name": "Entrada",
"type": "lobby",
"therm_setpoint_mode": "max",
"therm_setpoint_end_time": 1612749189,
}
],
"modules": [{"id": "12:34:56:00:01:ae", "name": "Entrada", "type": "NRV"}],
},
"mode": "max",
"event_type": "set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
# Test service setting the preset mode to "boost"
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: climate_entity_entrada, ATTR_PRESET_MODE: PRESET_BOOST},
blocking=True,
)
await hass.async_block_till_done()
# Test webhook valve mode change to "Max"
response = {
"room_id": "2833524037",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2833524037",
"name": "Entrada",
"type": "lobby",
"therm_setpoint_mode": "max",
"therm_setpoint_end_time": 1612749189,
}
],
"modules": [{"id": "12:34:56:00:01:ae", "name": "Entrada", "type": "NRV"}],
},
"mode": "max",
"event_type": "set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_entrada).state == "heat"
assert hass.states.get(climate_entity_entrada).attributes["temperature"] == 30
async def test_service_preset_mode_boost_valves(hass, climate_entry):
"""Test service with boost preset for valves."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_entrada = "climate.netatmo_entrada"
# Test service setting the preset mode to "boost"
assert hass.states.get(climate_entity_entrada).state == "auto"
assert hass.states.get(climate_entity_entrada).attributes["temperature"] == 7
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: climate_entity_entrada, ATTR_PRESET_MODE: PRESET_BOOST},
blocking=True,
)
await hass.async_block_till_done()
# Fake backend response
response = {
"room_id": "2833524037",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2833524037",
"name": "Entrada",
"type": "lobby",
"therm_setpoint_mode": "max",
"therm_setpoint_end_time": 1612749189,
}
],
"modules": [{"id": "12:34:56:00:01:ae", "name": "Entrada", "type": "NRV"}],
},
"mode": "max",
"event_type": "set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_entrada).state == "heat"
assert hass.states.get(climate_entity_entrada).attributes["temperature"] == 30
async def test_service_preset_mode_invalid(hass, climate_entry, caplog):
"""Test service with invalid preset."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: "climate.netatmo_cocina", ATTR_PRESET_MODE: "invalid"},
blocking=True,
)
await hass.async_block_till_done()
assert "Preset mode 'invalid' not available" in caplog.text
async def test_valves_service_turn_off(hass, climate_entry):
"""Test service turn off for valves."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_entrada = "climate.netatmo_entrada"
# Test turning valve off
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: climate_entity_entrada},
blocking=True,
)
await hass.async_block_till_done()
# Fake backend response for valve being turned off
response = {
"room_id": "2833524037",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2833524037",
"name": "Entrada",
"type": "lobby",
"therm_setpoint_mode": "off",
}
],
"modules": [{"id": "12:34:56:00:01:ae", "name": "Entrada", "type": "NRV"}],
},
"mode": "off",
"event_type": "set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_entrada).state == "off"
async def test_valves_service_turn_on(hass, climate_entry):
"""Test service turn on for valves."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_entrada = "climate.netatmo_entrada"
# Test turning valve on
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: climate_entity_entrada},
blocking=True,
)
await hass.async_block_till_done()
# Fake backend response for valve being turned on
response = {
"room_id": "2833524037",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2833524037",
"name": "Entrada",
"type": "lobby",
"therm_setpoint_mode": "home",
}
],
"modules": [{"id": "12:34:56:00:01:ae", "name": "Entrada", "type": "NRV"}],
},
"mode": "home",
"event_type": "cancel_set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_entrada).state == "auto"
@pytest.mark.parametrize(
"batterylevel, module_type, expected",
[
(4101, NA_THERM, 100),
(3601, NA_THERM, 80),
(3450, NA_THERM, 65),
(3301, NA_THERM, 50),
(3001, NA_THERM, 20),
(2799, NA_THERM, 0),
(3201, NA_VALVE, 100),
(2701, NA_VALVE, 80),
(2550, NA_VALVE, 65),
(2401, NA_VALVE, 50),
(2201, NA_VALVE, 20),
(2001, NA_VALVE, 0),
],
)
async def test_interpolate(batterylevel, module_type, expected):
"""Test interpolation of battery levels depending on device type."""
assert climate.interpolate(batterylevel, module_type) == expected
async def test_get_all_home_ids():
"""Test extracting all home ids returned by NetAtmo API."""
# Test with backend returning no data
assert climate.get_all_home_ids(None) == []
# Test with fake data
home_data = Mock()
home_data.homes = {
"123": {"id": "123", "name": "Home 1", "modules": [], "therm_schedules": []},
"987": {"id": "987", "name": "Home 2", "modules": [], "therm_schedules": []},
}
expected = ["123", "987"]
assert climate.get_all_home_ids(home_data) == expected
async def test_webhook_home_id_mismatch(hass, climate_entry):
"""Test service turn on for valves."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_entrada = "climate.netatmo_entrada"
assert hass.states.get(climate_entity_entrada).state == "auto"
# Fake backend response for valve being turned on
response = {
"room_id": "2833524037",
"home": {
"id": "123",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2833524037",
"name": "Entrada",
"type": "lobby",
"therm_setpoint_mode": "home",
}
],
"modules": [{"id": "12:34:56:00:01:ae", "name": "Entrada", "type": "NRV"}],
},
"mode": "home",
"event_type": "cancel_set_point",
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_entrada).state == "auto"
async def test_webhook_set_point(hass, climate_entry):
"""Test service turn on for valves."""
webhook_id = climate_entry.data[CONF_WEBHOOK_ID]
climate_entity_entrada = "climate.netatmo_entrada"
# Fake backend response for valve being turned on
response = {
"room_id": "2746182631",
"home": {
"id": "91763b24c43d3e344f424e8b",
"name": "MYHOME",
"country": "DE",
"rooms": [
{
"id": "2833524037",
"name": "Entrada",
"type": "lobby",
"therm_setpoint_mode": "home",
"therm_setpoint_temperature": 30,
}
],
"modules": [{"id": "12:34:56:00:01:ae", "name": "Entrada", "type": "NRV"}],
},
"mode": "home",
"event_type": "set_point",
"temperature": 21,
"push_type": "display_change",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(climate_entity_entrada).state == "heat"
|
|
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
from azure_common import BaseTest, arm_template
from c7n_azure.session import Session
from dateutil import tz as tzutils
from mock import patch
from c7n.testing import mock_datetime_now
from c7n.utils import local_session
class VMTest(BaseTest):
def setUp(self):
super(VMTest, self).setUp()
def test_validate_vm_schemas(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'offhour'},
{'type': 'onhour'},
{'type': 'network-interface'},
{'type': 'instance-view'}
],
'actions': [
{'type': 'poweroff'},
{'type': 'stop'},
{'type': 'start'},
{'type': 'restart'},
{'type': 'poweroff'}
]
}, validate=True)
self.assertTrue(p)
@arm_template('vm.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('vm.json')
def test_find_running(self):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
fake_running_vms = [{
'resourceGroup': 'test_vm',
'name': 'test_vm'
}]
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
def test_stop(self, filter_mock):
with patch(self._get_vm_client_string() + '.deallocate') as stop_action_mock:
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'stop'}
]
})
p.run()
stop_action_mock.assert_called_with(
self.fake_running_vms[0]['resourceGroup'],
self.fake_running_vms[0]['name'])
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
def test_poweroff(self, filter_mock):
with patch(self._get_vm_client_string() + '.power_off') as poweroff_action_mock:
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'poweroff'}
]
})
p.run()
poweroff_action_mock.assert_called_with(
self.fake_running_vms[0]['resourceGroup'],
self.fake_running_vms[0]['name'],
)
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
def test_start(self, filter_mock):
with patch(self._get_vm_client_string() + '.start') as start_action_mock:
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'start'}
]
})
p.run()
start_action_mock.assert_called_with(
self.fake_running_vms[0]['resourceGroup'],
self.fake_running_vms[0]['name'])
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
def test_restart(self, filter_mock):
with patch(self._get_vm_client_string() + '.restart') as restart_action_mock:
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'restart'}
]
})
p.run()
restart_action_mock.assert_called_with(
self.fake_running_vms[0]['resourceGroup'],
self.fake_running_vms[0]['name'])
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
@patch('c7n_azure.actions.delete.DeleteAction.process', return_value='')
def test_delete(self, delete_action_mock, filter_mock):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'delete'}
]
})
p.run()
delete_action_mock.assert_called_with(self.fake_running_vms)
@arm_template('vm.json')
def test_find_vm_with_public_ip(self):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'network-interface',
'key': 'properties.ipConfigurations[].properties.publicIPAddress.id',
'op': 'eq',
'value': 'not-null'}
],
})
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'network-interface',
'key': 'properties.ipConfigurations[].properties.publicIPAddress.id',
'op': 'eq',
'value': 'null'}
],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('vm.json')
def test_on_off_hours(self):
t = datetime.datetime.now(tzutils.gettz("pt"))
t = t.replace(year=2018, month=8, day=24, hour=18, minute=30)
with mock_datetime_now(t, datetime):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'offhour',
'default_tz': "pt",
'offhour': 18,
'tag': 'schedule'}
],
})
resources = p.run()
self.assertEqual(len(resources), 1)
t = t.replace(year=2018, month=8, day=24, hour=8, minute=30)
with mock_datetime_now(t, datetime):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'onhour',
'default_tz': "pt",
'onhour': 8,
'tag': 'schedule'}
],
})
resources = p.run()
self.assertEqual(len(resources), 1)
def _get_vm_client_string(self):
client = local_session(Session)\
.client('azure.mgmt.compute.ComputeManagementClient').virtual_machines
return client.__module__ + '.' + client.__class__.__name__
|
|
#!/usr/bin/env python
"""
A library for computing local K function for network-constrained data
Author:
Andrew Winslow Andrew.Winslow@asu.edu
Myunghwa Hwang mhwang4@gmail.com
"""
import unittest
import numpy as np
import geodanet.network as pynet
import geodanet.kfuncs as pykfuncs
import geodanet.simulator as pysim
import time
import random
import platform
try:
if platform.system() == 'Darwin':
import multiprocessing
else:
multiprocessing = None
except ImportError:
multiprocessing = None
class WeightedRandomSampleGenerator(object):
"""
A generator for randomly sampling n elements from
a population group with consideration to a given set of weights
"""
def __init__(self, weights, population, n):
"""
weights: an iterable with m numeric elements
population: a numpy array with m elements
n: an integer representing sample size
"""
self.totals = np.cumsum(weights)
self.population = population
self.n = n
self.norm = self.totals[-1]
def next(self):
sample = []
for i in xrange(self.n):
throw = np.random.rand()*self.norm
sample.append(self.population[np.searchsorted(self.totals, throw)])
return sample
def __call__(self):
return self.next()
class RandomSampleGenerator(object):
"""
A generator for randomly sampling n elements
from a population group
"""
def __init__(self, population, n):
"""
population: a numpy array with m elements
n: an integer representing sample size
"""
self.population = population
self.n = n
def next(self):
return random.sample(self.population, self.n)
def __call__(self):
return self.next()
def local_k(network, events, refs, scale_set, cache=None):
"""
Computes local K function
network: an undirected network data to which reference points are injected
refs: a set of reference points on the given network
points unprojected into the network
events: a set of event points on the given network
points projected into the network
scale_set: a tuple defining spatial scales to be examined
(min, max, interval)
"""
node2localK = {}
net_distances = {}
if cache: net_distances = cache
for node in refs:
node = node[1][0]
a_dest = network[node].keys()[0]
node_proj = (node, a_dest, 0, network[node][a_dest])
if node not in net_distances:
net_distances[node] = pynet.dijkstras(network, node, scale_set[1])
if a_dest not in net_distances:
net_distances[a_dest] = pynet.dijkstras(network, node, scale_set[1])
distances = pynet.proj_distances_undirected(network, node_proj, events, scale_set[1], cache=net_distances).values()
node2localK[node] = pykfuncs.kt_values(scale_set, distances, 1)
return node2localK, net_distances
def cluster_type(obs, lower, upper):
if obs < lower: return -1
if obs > upper: return 1
return 0
def simulate_local_k_01(args):
sims = args[0]
n = args[1]
net_file = args[2]
network = args[3]
events = args[4]
refs = args[5]
scale_set = args[6]
cache = args[7]
#print 'simulated_local_k_01'
simulator = pysim.Simulation(net_file)
sims_outcomes = []
for sim in xrange(sims):
points = simulator.getRandomPoints(n, projected=True)
sim_events = []
for edge in points:
for point in points[edge]:
sim_events.append(point)
res, dists = local_k(network, sim_events, refs, scale_set, cache=cache)
sims_outcomes.append(res)
return sims_outcomes
def simulate_local_k_02(args):
sims = args[0]
n = args[1]
refs = args[2]
scale_set = args[3]
cache = args[4]
#print 'simulated_local_k_02'
sims_outcomes = []
sampler = RandomSampleGenerator(refs, n).next
for sim in xrange(sims):
sim_events = sampler()
sim_localk = {}
for node in refs:
all_distances = cache[node[1][0]]
distances = []
for event in sim_events:
event = event[1][0]
if event in all_distances:
distances.append(all_distances[event])
sim_localk[node[1][0]] = pykfuncs.kt_values(scale_set, distances, 1)
sims_outcomes.append(sim_localk)
return sims_outcomes
def k_cluster(network, events, refs, scale_set, sims, sig=0.1, sim_network=None, cpus=1):
"""
Parameters:
network: a network to which reference points are injected
events: a set of event points projected into the network
refs: a set of reference points unprojected into the network
scale_set: tuple same as (min, max, resolution)
sims: integer; the number of simulations
sig: float; siginificance level
sim_network: the source shape file containing the network data
this is used to simualte point patterns for inference
cpus: integer: the number of cpus
multiprocessing can be used for inference
"""
"""
1. For an observed set of n events on the network, calculate local K function
values for all m reference points
"""
node2localK, net_dists = local_k(network, events, refs, scale_set)
"""
When n < m (simulator == None):
2. Select n out of m reference points randomly and
calculate local K function values for these randomly sampled points
When n >= m (simulator != None):
2. Randomly simulate n points on network edges and
calculate local K function values for these randomly simulated points
3. Repeat 2 as many as the number of simulations
Note: on Darwin systems, simulation will be parallelized
"""
n = len(events)
sims_outcomes = []
if not multiprocessing or cpus == 1:
if sim_network:
sims_outcomes = simulate_local_k_01((sims, n, sim_network, network, events, refs, scale_set, net_dists))
else:
sims_outcomes = simulate_local_k_02((sims, n, refs, scale_set, net_dists))
elif multiprocessing and cpus >= 2:
pool = multiprocessing.Pool(cpus)
sims_list = range(sims)
sims_list = map(len, [sims_list[i::cpus] for i in xrange(cpus)])
partial_outcomes = None
if sim_network:
partial_outcomes = pool.map(simulate_local_k_01,
[(sim, n, sim_network, network, events, refs, scale_set, net_dists) for sim in sims_list])
else:
partial_outcomes = pool.map(simulate_local_k_02,
[(sim, n, refs, scale_set, net_dists) for sim in sims_list])
sims_outcomes = partial_outcomes[0]
for partial in partial_outcomes[1:]:
sims_outcomes.extend(partial)
"""
4. Determine lower and upper envelopes for the observed K function values
as well as the type of cluster (dispersion or clustering)
"""
# 4. P-value evaluation
lower_envelope = {}
upper_envelope = {}
lower_p = int(sims*sig/2)
upper_p = int(sims*(1-sig/2))
localKs = {}
for node in refs:
node = node[1][0]
lower_envelope[node] = {}
upper_envelope[node] = {}
localKs[node] = {}
for scale in node2localK[node].keys():
local_outcomes = [sim[node][scale] for sim in sims_outcomes]
local_outcomes.sort()
obs = node2localK[node][scale]
lower = local_outcomes[lower_p]
upper = local_outcomes[upper_p]
cluster = cluster_type(obs, lower, upper)
localKs[node][scale] = [obs, lower, upper, cluster]
return localKs
|
|
# coding:utf-8
import argparse
import logging
import os
import re
import time
import urlparse
from config import config
import requests
import torndb
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class DownloadError(Error):
def __init__(self, code, message):
self.code = code
self.message = message
class RangeListAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
res = []
for one in values.split(','):
if re.match(r'^\s*$', one) is not None:
continue
m = re.match(r'^\d+$', one)
if m is not None:
res.append(int(m.group(0)))
continue
m = re.match(r'^(\d+)-(\d+)$', one)
if m is not None:
res.append((int(m.group(1)), int(m.group(2))))
continue
raise argparse.ArgumentError(self, 'invalid range: {}'.format(one))
setattr(namespace, self.dest, res)
def gen_session():
global session
session = requests.Session()
session.cookies.update({
'hXas_2132_saltkey': 'KjMz7F1A',
'hXas_2132_lastvisit': '1479896516',
'hXas_2132_auth': 'e608BORLeSxH6vZ4gBU%2FoujokP39ngOFXFDg02PelFeJL7KoCuAi2fOaIBYgbXvBFTeo5cso5a640SL3LAAPcuIW%2BQ',
'hXas_2132_nofavfid': '1',
'hXas_2132_home_diymode': '1',
'hXas_2132_st_t': '37636%7C1479910002%7C3c8388b3b5e70d1c822f3ab4452040aa',
'hXas_2132_forum_lastvisit': 'D_39_1479906049D_41_1479906696D_47_1479907564D_63_1479909524D_2_1479910002',
'hXas_2132_ulastactivity': 'a753C3VJSrbSxAweSdYZwD7ASLHrk5WDMJXjoN58nEia4xB0rhch',
'hXas_2132_visitedfid': '42D43D47D40D2D63D41D39',
'hXas_2132_st_p': '37636%7C1479955254%7C7f9231ffde8186a42e367363bcd5ab37',
'hXas_2132_viewid': 'tid_35438',
'hXas_2132_smile': '1D1',
'hXas_2132_seccode': '16.8a26d01f8d0c516f63',
'hXas_2132_lip': '61.129.42.67%2C1479966530',
'hXas_2132_creditnotice': '0D1D0D0D0D0D0D0D0D37636',
'hXas_2132_creditbase': '0D1010282D0D0D0D0D0D0D0',
'hXas_2132_creditrule': '%E4%B8%8B%E8%BD%BD%E9%99%84%E4%BB%B6',
'hXas_2132_sid': 'h93eWz',
'hXas_2132_checkpm': '1',
'hXas_2132_sendmail': '1',
'a8672_pages': '1',
'a8672_times': '4',
'hXas_2132_lastact': '1479966534%09forum.php%09attachment',
})
def switch_proxy():
global session
gen_session()
return # return to disable proxy
proxy = requests.get('http://dynamic.goubanjia.com/dynamic/get/cfee1493c4b89a6a3d42e0e11551a922.html?ttl').content
proxy = proxy.strip().split(',')
log.info('switch proxy: %s', proxy)
session.proxies = {
'http': proxy[0],
}
try:
log.info('IP: %s', session.get('http://members.3322.org/dyndns/getip').content.strip())
except Exception as e:
log.debug('proxy exception: %s -> %s', type(e), e.message)
log.info('proxy unusable')
switch_proxy()
def get_response(url, headers=None):
global session
timeout_cnt = 0
failed_cnt = 0
while True:
try:
res = session.get(url, headers=headers, timeout=30, proxies=session.proxies)
except requests.exceptions.ConnectionError as e:
switch_proxy()
except requests.exceptions.ProxyError as e:
switch_proxy()
except requests.exceptions.ReadTimeout as e:
timeout_cnt += 1
if timeout_cnt >= 3:
raise DownloadError(900, 'download timeout')
except Exception as e:
try:
log.error('unknown error: %s -> %s', type(e), e.message)
except Exception as e:
pass
failed_cnt += 1
if failed_cnt >= 2:
raise DownloadError(999, 'unknown error')
else:
if res.status_code == 404:
raise DownloadError(404, '404 not found: {}'.format(res.url))
return res
def single_download(stop_id, last_id=None):
if last_id is None:
last_id = stop_id - 1
sql = 'SELECT * FROM daanjia_document WHERE id > %s AND id <= %s ORDER BY id LIMIT 1'
document = db.get(sql, last_id, stop_id)
if document is None:
return stop_id
try:
log.info('downloading: %d %s', document['id'], document['filename'])
except Exception as e:
log.info('downloading: %d', document['id'])
directory = os.path.join('files', document['directory'])
if not os.path.exists(directory):
os.makedirs(directory)
try:
res = get_response(document['download_url'])
direct_url = re.search(r"window\.location\.href\s*='(.*?)'", res.content).group(1)
direct_url = urlparse.urljoin(document['download_url'], direct_url)
res = get_response(direct_url)
filename = u'{0[id]:d}-{0[filename]}'.format(document)
# replace illegal characters in filename
filename = re.sub(r'[\s\\/:*?"<>|]', '', filename)
with open(os.path.join(directory, filename), 'wb') as fp:
fp.write(res.content)
state = 1
log.info('download succeeded')
except DownloadError as e:
state = e.code
log.warning('%s -> %s', type(e), e.message)
sql = 'UPDATE daanjia_document SET state = %s WHERE id = %s'
db.update(sql, state, document['id'])
return document['id']
def range_download(start_id, stop_id):
last_id = start_id - 1
while last_id < stop_id:
last_id = single_download(stop_id, last_id)
def daemon_download(daemon_id, interval=60):
db.update('UPDATE daanjia_document SET state = 0 WHERE state = %s', -daemon_id)
while True:
affected_rows = db.update(
'UPDATE daanjia_document SET state = %s WHERE state = 0 LIMIT 1',
-daemon_id)
if affected_rows == 0:
log.info('no document to download, sleep %ds...', interval)
time.sleep(interval)
continue
# I know this is inefficient, but it's easy to realize.
stdoc = db.get(
'SELECT id FROM daanjia_document WHERE state = %s LIMIT 1', -daemon_id)
single_download(stdoc['id'])
def init():
global log, db
log = logging.getLogger('crawl')
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler())
switch_proxy()
db = torndb.Connection(
config.get('database', 'host'),
config.get('database', 'database'),
user=config.get('database', 'user'),
password=config.get('database', 'password'),
max_idle_time=240)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-r', '--range', default=[], action=RangeListAction, help='download id range')
parser.add_argument('-d', '--daemon', type=int, choices=xrange(1, 100), metavar='daemon_id',
help='run as a daemon, use the specifc daemon id to identify')
args = parser.parse_args()
init()
if args.daemon:
daemon_download(args.daemon)
exit()
for r in args.range:
if type(r) == int:
single_download(r)
else:
range_download(r[0], r[1])
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
To run these tests against a live database:
1. Modify the file ``keystone/tests/unit/config_files/backend_sql.conf`` to use
the connection for your live database.
2. Set up a blank, live database
3. Run the tests using::
tox -e py27 -- keystone.tests.unit.test_sql_upgrade
WARNING::
Your database will be wiped.
Do not do this against a database with valuable data as
all data will be lost.
"""
import json
import uuid
from migrate.versioning import api as versioning_api
from migrate.versioning import repository
import mock
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db.sqlalchemy import migration
from oslo_db.sqlalchemy import session as db_session
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy import schema
from testtools import matchers
from keystone.common import sql
from keystone.common.sql import migration_helpers
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
CONF = cfg.CONF
# NOTE(morganfainberg): This should be updated when each DB migration collapse
# is done to mirror the expected structure of the DB in the format of
# { <DB_TABLE_NAME>: [<COLUMN>, <COLUMN>, ...], ... }
INITIAL_TABLE_STRUCTURE = {
'credential': [
'id', 'user_id', 'project_id', 'blob', 'type', 'extra',
],
'domain': [
'id', 'name', 'enabled', 'extra',
],
'endpoint': [
'id', 'legacy_endpoint_id', 'interface', 'region_id', 'service_id',
'url', 'enabled', 'extra',
],
'group': [
'id', 'domain_id', 'name', 'description', 'extra',
],
'policy': [
'id', 'type', 'blob', 'extra',
],
'project': [
'id', 'name', 'extra', 'description', 'enabled', 'domain_id',
'parent_id',
],
'role': [
'id', 'name', 'extra',
],
'service': [
'id', 'type', 'extra', 'enabled',
],
'token': [
'id', 'expires', 'extra', 'valid', 'trust_id', 'user_id',
],
'trust': [
'id', 'trustor_user_id', 'trustee_user_id', 'project_id',
'impersonation', 'deleted_at', 'expires_at', 'remaining_uses', 'extra',
],
'trust_role': [
'trust_id', 'role_id',
],
'user': [
'id', 'name', 'extra', 'password', 'enabled', 'domain_id',
'default_project_id',
],
'user_group_membership': [
'user_id', 'group_id',
],
'region': [
'id', 'description', 'parent_region_id', 'extra',
],
'assignment': [
'type', 'actor_id', 'target_id', 'role_id', 'inherited',
],
'id_mapping': [
'public_id', 'domain_id', 'local_id', 'entity_type',
],
'whitelisted_config': [
'domain_id', 'group', 'option', 'value',
],
'sensitive_config': [
'domain_id', 'group', 'option', 'value',
],
}
# Test migration_helpers.get_init_version separately to ensure it works before
# using in the SqlUpgrade tests.
class MigrationHelpersGetInitVersionTests(unit.TestCase):
@mock.patch.object(repository, 'Repository')
def test_get_init_version_no_path(self, repo):
migrate_versions = mock.MagicMock()
# make a version list starting with zero. `get_init_version` will
# return None for this value.
migrate_versions.versions.versions = list(range(0, 5))
repo.return_value = migrate_versions
# os.path.isdir() is called by `find_migrate_repo()`. Mock it to avoid
# an exception.
with mock.patch('os.path.isdir', return_value=True):
# since 0 is the smallest version expect None
version = migration_helpers.get_init_version()
self.assertIsNone(version)
# check that the default path was used as the first argument to the
# first invocation of repo. Cannot match the full path because it is
# based on where the test is run.
param = repo.call_args_list[0][0][0]
self.assertTrue(param.endswith('/sql/migrate_repo'))
@mock.patch.object(repository, 'Repository')
def test_get_init_version_with_path_initial_version_0(self, repo):
migrate_versions = mock.MagicMock()
# make a version list starting with zero. `get_init_version` will
# return None for this value.
migrate_versions.versions.versions = list(range(0, 5))
repo.return_value = migrate_versions
# os.path.isdir() is called by `find_migrate_repo()`. Mock it to avoid
# an exception.
with mock.patch('os.path.isdir', return_value=True):
path = '/keystone/migrate_repo/'
# since 0 is the smallest version expect None
version = migration_helpers.get_init_version(abs_path=path)
self.assertIsNone(version)
@mock.patch.object(repository, 'Repository')
def test_get_init_version_with_path(self, repo):
initial_version = 10
migrate_versions = mock.MagicMock()
migrate_versions.versions.versions = list(range(initial_version + 1,
initial_version + 5))
repo.return_value = migrate_versions
# os.path.isdir() is called by `find_migrate_repo()`. Mock it to avoid
# an exception.
with mock.patch('os.path.isdir', return_value=True):
path = '/keystone/migrate_repo/'
version = migration_helpers.get_init_version(abs_path=path)
self.assertEqual(initial_version, version)
class SqlMigrateBase(unit.BaseTestCase):
# override this in subclasses. The default of zero covers tests such
# as extensions upgrades.
_initial_db_version = 0
def initialize_sql(self):
self.metadata = sqlalchemy.MetaData()
self.metadata.bind = self.engine
def repo_package(self):
return sql
def setUp(self):
super(SqlMigrateBase, self).setUp()
database.initialize_sql_session()
conn_str = CONF.database.connection
if (conn_str != unit.IN_MEM_DB_CONN_STRING and
conn_str.startswith('sqlite') and
conn_str[10:] == unit.DEFAULT_TEST_DB_FILE):
# Override the default with a DB that is specific to the migration
# tests only if the DB Connection string is the same as the global
# default. This is required so that no conflicts occur due to the
# global default DB already being under migrate control. This is
# only needed if the DB is not-in-memory
db_file = unit.dirs.tmp('keystone_migrate_test.db')
self.config_fixture.config(
group='database',
connection='sqlite:///%s' % db_file)
# create and share a single sqlalchemy engine for testing
with sql.session_for_write() as session:
self.engine = session.get_bind()
self.addCleanup(self.cleanup_instance('engine'))
self.Session = db_session.get_maker(self.engine, autocommit=False)
self.addCleanup(sqlalchemy.orm.session.Session.close_all)
self.initialize_sql()
self.repo_path = migration_helpers.find_migrate_repo(
self.repo_package())
self.schema = versioning_api.ControlledSchema.create(
self.engine,
self.repo_path,
self._initial_db_version)
# auto-detect the highest available schema version in the migrate_repo
self.max_version = self.schema.repository.version().version
self.addCleanup(sql.cleanup)
# drop tables and FKs.
self.addCleanup(self._cleanupDB)
def _cleanupDB(self):
meta = sqlalchemy.MetaData()
meta.bind = self.engine
meta.reflect(self.engine)
with self.engine.begin() as conn:
inspector = reflection.Inspector.from_engine(self.engine)
metadata = schema.MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk['name']:
continue
fks.append(
schema.ForeignKeyConstraint((), (), name=fk['name']))
table = schema.Table(table_name, metadata, *fks)
tbs.append(table)
all_fks.extend(fks)
for fkc in all_fks:
if self.engine.name != 'sqlite':
conn.execute(schema.DropConstraint(fkc))
for table in tbs:
conn.execute(schema.DropTable(table))
def select_table(self, name):
table = sqlalchemy.Table(name,
self.metadata,
autoload=True)
s = sqlalchemy.select([table])
return s
def assertTableExists(self, table_name):
try:
self.select_table(table_name)
except sqlalchemy.exc.NoSuchTableError:
raise AssertionError('Table "%s" does not exist' % table_name)
def assertTableDoesNotExist(self, table_name):
"""Asserts that a given table exists cannot be selected by name."""
# Switch to a different metadata otherwise you might still
# detect renamed or dropped tables
try:
temp_metadata = sqlalchemy.MetaData()
temp_metadata.bind = self.engine
sqlalchemy.Table(table_name, temp_metadata, autoload=True)
except sqlalchemy.exc.NoSuchTableError:
pass
else:
raise AssertionError('Table "%s" already exists' % table_name)
def assertTableCountsMatch(self, table1_name, table2_name):
try:
table1 = self.select_table(table1_name)
except sqlalchemy.exc.NoSuchTableError:
raise AssertionError('Table "%s" does not exist' % table1_name)
try:
table2 = self.select_table(table2_name)
except sqlalchemy.exc.NoSuchTableError:
raise AssertionError('Table "%s" does not exist' % table2_name)
session = self.Session()
table1_count = session.execute(table1.count()).scalar()
table2_count = session.execute(table2.count()).scalar()
if table1_count != table2_count:
raise AssertionError('Table counts do not match: {0} ({1}), {2} '
'({3})'.format(table1_name, table1_count,
table2_name, table2_count))
def upgrade(self, *args, **kwargs):
self._migrate(*args, **kwargs)
def _migrate(self, version, repository=None, downgrade=False,
current_schema=None):
repository = repository or self.repo_path
err = ''
version = versioning_api._migrate_version(self.schema,
version,
not downgrade,
err)
if not current_schema:
current_schema = self.schema
changeset = current_schema.changeset(version)
for ver, change in changeset:
self.schema.runchange(ver, change, changeset.step)
self.assertEqual(self.schema.version, version)
def assertTableColumns(self, table_name, expected_cols):
"""Asserts that the table contains the expected set of columns."""
self.initialize_sql()
table = self.select_table(table_name)
actual_cols = [col.name for col in table.columns]
# Check if the columns are equal, but allow for a different order,
# which might occur after an upgrade followed by a downgrade
self.assertItemsEqual(expected_cols, actual_cols,
'%s table' % table_name)
class SqlUpgradeTests(SqlMigrateBase):
_initial_db_version = migration_helpers.get_init_version()
def test_blank_db_to_start(self):
self.assertTableDoesNotExist('user')
def test_start_version_db_init_version(self):
with sql.session_for_write() as session:
version = migration.db_version(session.get_bind(), self.repo_path,
self._initial_db_version)
self.assertEqual(
self._initial_db_version,
version,
'DB is not at version %s' % self._initial_db_version)
def test_upgrade_add_initial_tables(self):
self.upgrade(self._initial_db_version + 1)
self.check_initial_table_structure()
def check_initial_table_structure(self):
for table in INITIAL_TABLE_STRUCTURE:
self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table])
def insert_dict(self, session, table_name, d, table=None):
"""Naively inserts key-value pairs into a table, given a dictionary."""
if table is None:
this_table = sqlalchemy.Table(table_name, self.metadata,
autoload=True)
else:
this_table = table
insert = this_table.insert().values(**d)
session.execute(insert)
session.commit()
def test_kilo_squash(self):
self.upgrade(67)
# In 053 the size of ID and parent region ID columns were changed
table = sqlalchemy.Table('region', self.metadata, autoload=True)
self.assertEqual(255, table.c.id.type.length)
self.assertEqual(255, table.c.parent_region_id.type.length)
table = sqlalchemy.Table('endpoint', self.metadata, autoload=True)
self.assertEqual(255, table.c.region_id.type.length)
# In 054 an index was created for the actor_id of the assignment table
table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
index_data = [(idx.name, list(idx.columns.keys()))
for idx in table.indexes]
self.assertIn(('ix_actor_id', ['actor_id']), index_data)
# In 055 indexes were created for user and trust IDs in the token table
table = sqlalchemy.Table('token', self.metadata, autoload=True)
index_data = [(idx.name, list(idx.columns.keys()))
for idx in table.indexes]
self.assertIn(('ix_token_user_id', ['user_id']), index_data)
self.assertIn(('ix_token_trust_id', ['trust_id']), index_data)
# In 062 the role ID foreign key was removed from the assignment table
if self.engine.name == "mysql":
self.assertFalse(self.does_fk_exist('assignment', 'role_id'))
# In 064 the domain ID FK was removed from the group and user tables
if self.engine.name != 'sqlite':
# sqlite does not support FK deletions (or enforcement)
self.assertFalse(self.does_fk_exist('group', 'domain_id'))
self.assertFalse(self.does_fk_exist('user', 'domain_id'))
# In 067 the role ID index was removed from the assignment table
if self.engine.name == "mysql":
self.assertFalse(self.does_index_exist('assignment',
'assignment_role_id_fkey'))
def test_insert_assignment_inherited_pk(self):
ASSIGNMENT_TABLE_NAME = 'assignment'
INHERITED_COLUMN_NAME = 'inherited'
ROLE_TABLE_NAME = 'role'
self.upgrade(72)
# Check that the 'inherited' column is not part of the PK
self.assertFalse(self.does_pk_exist(ASSIGNMENT_TABLE_NAME,
INHERITED_COLUMN_NAME))
session = self.Session()
role = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.insert_dict(session, ROLE_TABLE_NAME, role)
# Create both inherited and noninherited role assignments
inherited = {'type': 'UserProject',
'actor_id': uuid.uuid4().hex,
'target_id': uuid.uuid4().hex,
'role_id': role['id'],
'inherited': True}
noninherited = inherited.copy()
noninherited['inherited'] = False
# Create another inherited role assignment as a spoiler
spoiler = inherited.copy()
spoiler['actor_id'] = uuid.uuid4().hex
self.insert_dict(session, ASSIGNMENT_TABLE_NAME, inherited)
self.insert_dict(session, ASSIGNMENT_TABLE_NAME, spoiler)
# Since 'inherited' is not part of the PK, we can't insert noninherited
self.assertRaises(db_exception.DBDuplicateEntry,
self.insert_dict,
session,
ASSIGNMENT_TABLE_NAME,
noninherited)
session.close()
self.upgrade(73)
session = self.Session()
self.metadata.clear()
# Check that the 'inherited' column is now part of the PK
self.assertTrue(self.does_pk_exist(ASSIGNMENT_TABLE_NAME,
INHERITED_COLUMN_NAME))
# The noninherited role assignment can now be inserted
self.insert_dict(session, ASSIGNMENT_TABLE_NAME, noninherited)
assignment_table = sqlalchemy.Table(ASSIGNMENT_TABLE_NAME,
self.metadata,
autoload=True)
assignments = session.query(assignment_table).all()
for assignment in (inherited, spoiler, noninherited):
self.assertIn((assignment['type'], assignment['actor_id'],
assignment['target_id'], assignment['role_id'],
assignment['inherited']),
assignments)
def does_pk_exist(self, table, pk_column):
"""Checks whether a column is primary key on a table."""
inspector = reflection.Inspector.from_engine(self.engine)
pk_columns = inspector.get_pk_constraint(table)['constrained_columns']
return pk_column in pk_columns
def does_fk_exist(self, table, fk_column):
inspector = reflection.Inspector.from_engine(self.engine)
for fk in inspector.get_foreign_keys(table):
if fk_column in fk['constrained_columns']:
return True
return False
def does_index_exist(self, table_name, index_name):
meta = sqlalchemy.MetaData(bind=self.engine)
table = sqlalchemy.Table(table_name, meta, autoload=True)
return index_name in [idx.name for idx in table.indexes]
def test_endpoint_policy_upgrade(self):
self.assertTableDoesNotExist('policy_association')
self.upgrade(81)
self.assertTableColumns('policy_association',
['id', 'policy_id', 'endpoint_id',
'service_id', 'region_id'])
@mock.patch.object(migration_helpers, 'get_db_version', return_value=1)
def test_endpoint_policy_already_migrated(self, mock_ep):
# By setting the return value to 1, the migration has already been
# run, and there's no need to create the table again
self.upgrade(81)
mock_ep.assert_called_once_with(extension='endpoint_policy',
engine=mock.ANY)
# It won't exist because we are mocking it, but we can verify
# that 081 did not create the table
self.assertTableDoesNotExist('policy_association')
def test_create_federation_tables(self):
self.identity_provider = 'identity_provider'
self.federation_protocol = 'federation_protocol'
self.service_provider = 'service_provider'
self.mapping = 'mapping'
self.remote_ids = 'idp_remote_ids'
self.assertTableDoesNotExist(self.identity_provider)
self.assertTableDoesNotExist(self.federation_protocol)
self.assertTableDoesNotExist(self.service_provider)
self.assertTableDoesNotExist(self.mapping)
self.assertTableDoesNotExist(self.remote_ids)
self.upgrade(82)
self.assertTableColumns(self.identity_provider,
['id', 'description', 'enabled'])
self.assertTableColumns(self.federation_protocol,
['id', 'idp_id', 'mapping_id'])
self.assertTableColumns(self.mapping,
['id', 'rules'])
self.assertTableColumns(self.service_provider,
['id', 'description', 'enabled', 'auth_url',
'relay_state_prefix', 'sp_url'])
self.assertTableColumns(self.remote_ids, ['idp_id', 'remote_id'])
federation_protocol = sqlalchemy.Table(self.federation_protocol,
self.metadata,
autoload=True)
self.assertFalse(federation_protocol.c.mapping_id.nullable)
sp_table = sqlalchemy.Table(self.service_provider,
self.metadata,
autoload=True)
self.assertFalse(sp_table.c.auth_url.nullable)
self.assertFalse(sp_table.c.sp_url.nullable)
@mock.patch.object(migration_helpers, 'get_db_version', return_value=8)
def test_federation_already_migrated(self, mock_federation):
# By setting the return value to 8, the migration has already been
# run, and there's no need to create the table again.
self.upgrade(82)
mock_federation.assert_any_call(extension='federation',
engine=mock.ANY)
# It won't exist because we are mocking it, but we can verify
# that 082 did not create the table.
self.assertTableDoesNotExist('identity_provider')
self.assertTableDoesNotExist('federation_protocol')
self.assertTableDoesNotExist('mapping')
self.assertTableDoesNotExist('service_provider')
self.assertTableDoesNotExist('idp_remote_ids')
def test_create_oauth_tables(self):
consumer = 'consumer'
request_token = 'request_token'
access_token = 'access_token'
self.assertTableDoesNotExist(consumer)
self.assertTableDoesNotExist(request_token)
self.assertTableDoesNotExist(access_token)
self.upgrade(83)
self.assertTableColumns(consumer,
['id',
'description',
'secret',
'extra'])
self.assertTableColumns(request_token,
['id',
'request_secret',
'verifier',
'authorizing_user_id',
'requested_project_id',
'role_ids',
'consumer_id',
'expires_at'])
self.assertTableColumns(access_token,
['id',
'access_secret',
'authorizing_user_id',
'project_id',
'role_ids',
'consumer_id',
'expires_at'])
@mock.patch.object(migration_helpers, 'get_db_version', return_value=5)
def test_oauth1_already_migrated(self, mock_oauth1):
# By setting the return value to 5, the migration has already been
# run, and there's no need to create the table again.
self.upgrade(83)
mock_oauth1.assert_any_call(extension='oauth1', engine=mock.ANY)
# It won't exist because we are mocking it, but we can verify
# that 083 did not create the table.
self.assertTableDoesNotExist('consumer')
self.assertTableDoesNotExist('request_token')
self.assertTableDoesNotExist('access_token')
def test_create_revoke_table(self):
self.assertTableDoesNotExist('revocation_event')
self.upgrade(84)
self.assertTableColumns('revocation_event',
['id', 'domain_id', 'project_id', 'user_id',
'role_id', 'trust_id', 'consumer_id',
'access_token_id', 'issued_before',
'expires_at', 'revoked_at',
'audit_chain_id', 'audit_id'])
@mock.patch.object(migration_helpers, 'get_db_version', return_value=2)
def test_revoke_already_migrated(self, mock_revoke):
# By setting the return value to 2, the migration has already been
# run, and there's no need to create the table again.
self.upgrade(84)
mock_revoke.assert_any_call(extension='revoke', engine=mock.ANY)
# It won't exist because we are mocking it, but we can verify
# that 084 did not create the table.
self.assertTableDoesNotExist('revocation_event')
def test_project_is_domain_upgrade(self):
self.upgrade(74)
self.assertTableColumns('project',
['id', 'name', 'extra', 'description',
'enabled', 'domain_id', 'parent_id',
'is_domain'])
def test_implied_roles_upgrade(self):
self.upgrade(87)
self.assertTableColumns('implied_role',
['prior_role_id', 'implied_role_id'])
self.assertTrue(self.does_fk_exist('implied_role', 'prior_role_id'))
self.assertTrue(self.does_fk_exist('implied_role', 'implied_role_id'))
def test_add_config_registration(self):
config_registration = 'config_register'
self.upgrade(74)
self.assertTableDoesNotExist(config_registration)
self.upgrade(75)
self.assertTableColumns(config_registration, ['type', 'domain_id'])
def test_endpoint_filter_upgrade(self):
def assert_tables_columns_exist():
self.assertTableColumns('project_endpoint',
['endpoint_id', 'project_id'])
self.assertTableColumns('endpoint_group',
['id', 'name', 'description', 'filters'])
self.assertTableColumns('project_endpoint_group',
['endpoint_group_id', 'project_id'])
self.assertTableDoesNotExist('project_endpoint')
self.upgrade(85)
assert_tables_columns_exist()
@mock.patch.object(migration_helpers, 'get_db_version', return_value=2)
def test_endpoint_filter_already_migrated(self, mock_endpoint_filter):
# By setting the return value to 2, the migration has already been
# run, and there's no need to create the table again.
self.upgrade(85)
mock_endpoint_filter.assert_any_call(extension='endpoint_filter',
engine=mock.ANY)
# It won't exist because we are mocking it, but we can verify
# that 085 did not create the table.
self.assertTableDoesNotExist('project_endpoint')
self.assertTableDoesNotExist('endpoint_group')
self.assertTableDoesNotExist('project_endpoint_group')
def test_add_trust_unique_constraint_upgrade(self):
self.upgrade(86)
inspector = reflection.Inspector.from_engine(self.engine)
constraints = inspector.get_unique_constraints('trust')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertIn('duplicate_trust_constraint', constraint_names)
def test_add_domain_specific_roles(self):
"""Check database upgraded successfully for domain specific roles.
The following items need to be checked:
- The domain_id column has been added
- That it has been added to the uniqueness constraints
- Existing roles have their domain_id columns set to the specific
string of '<<null>>'
"""
NULL_DOMAIN_ID = '<<null>>'
self.upgrade(87)
session = self.Session()
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
# Add a role before we upgrade, so we can check that its new domain_id
# attribute is handled correctly
role_id = uuid.uuid4().hex
self.insert_dict(session, 'role',
{'id': role_id, 'name': uuid.uuid4().hex})
session.close()
self.upgrade(88)
session = self.Session()
self.metadata.clear()
self.assertTableColumns('role', ['id', 'name', 'domain_id', 'extra'])
# Check the domain_id has been added to the uniqueness constraint
inspector = reflection.Inspector.from_engine(self.engine)
constraints = inspector.get_unique_constraints('role')
constraint_columns = [
constraint['column_names'] for constraint in constraints
if constraint['name'] == 'ixu_role_name_domain_id']
self.assertIn('domain_id', constraint_columns[0])
# Now check our role has its domain_id attribute set correctly
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
cols = [role_table.c.domain_id]
filter = role_table.c.id == role_id
statement = sqlalchemy.select(cols).where(filter)
role_entry = session.execute(statement).fetchone()
self.assertEqual(NULL_DOMAIN_ID, role_entry[0])
def test_add_root_of_all_domains(self):
NULL_DOMAIN_ID = '<<keystone.domain.root>>'
self.upgrade(89)
session = self.Session()
domain_table = sqlalchemy.Table(
'domain', self.metadata, autoload=True)
query = session.query(domain_table).filter_by(id=NULL_DOMAIN_ID)
domain_from_db = query.one()
self.assertIn(NULL_DOMAIN_ID, domain_from_db)
project_table = sqlalchemy.Table(
'project', self.metadata, autoload=True)
query = session.query(project_table).filter_by(id=NULL_DOMAIN_ID)
project_from_db = query.one()
self.assertIn(NULL_DOMAIN_ID, project_from_db)
session.close()
def test_add_local_user_and_password_tables(self):
local_user_table = 'local_user'
password_table = 'password'
self.upgrade(89)
self.assertTableDoesNotExist(local_user_table)
self.assertTableDoesNotExist(password_table)
self.upgrade(90)
self.assertTableColumns(local_user_table,
['id',
'user_id',
'domain_id',
'name'])
self.assertTableColumns(password_table,
['id',
'local_user_id',
'password'])
def test_migrate_data_to_local_user_and_password_tables(self):
def get_expected_users():
expected_users = []
for test_user in default_fixtures.USERS:
user = {}
user['id'] = uuid.uuid4().hex
user['name'] = test_user['name']
user['domain_id'] = test_user['domain_id']
user['password'] = test_user['password']
user['enabled'] = True
user['extra'] = json.dumps(uuid.uuid4().hex)
user['default_project_id'] = uuid.uuid4().hex
expected_users.append(user)
return expected_users
def add_users_to_db(expected_users, user_table):
for user in expected_users:
ins = user_table.insert().values(
{'id': user['id'],
'name': user['name'],
'domain_id': user['domain_id'],
'password': user['password'],
'enabled': user['enabled'],
'extra': user['extra'],
'default_project_id': user['default_project_id']})
ins.execute()
def get_users_from_db(user_table, local_user_table, password_table):
sel = (
sqlalchemy.select([user_table.c.id,
user_table.c.enabled,
user_table.c.extra,
user_table.c.default_project_id,
local_user_table.c.name,
local_user_table.c.domain_id,
password_table.c.password])
.select_from(user_table.join(local_user_table,
user_table.c.id ==
local_user_table.c.user_id)
.join(password_table,
local_user_table.c.id ==
password_table.c.local_user_id))
)
user_rows = sel.execute()
users = []
for row in user_rows:
users.append(
{'id': row['id'],
'name': row['name'],
'domain_id': row['domain_id'],
'password': row['password'],
'enabled': row['enabled'],
'extra': row['extra'],
'default_project_id': row['default_project_id']})
return users
meta = sqlalchemy.MetaData()
meta.bind = self.engine
user_table_name = 'user'
local_user_table_name = 'local_user'
password_table_name = 'password'
# populate current user table
self.upgrade(90)
user_table = sqlalchemy.Table(user_table_name, meta, autoload=True)
expected_users = get_expected_users()
add_users_to_db(expected_users, user_table)
# upgrade to migration and test
self.upgrade(91)
self.assertTableCountsMatch(user_table_name, local_user_table_name)
self.assertTableCountsMatch(local_user_table_name, password_table_name)
meta.clear()
user_table = sqlalchemy.Table(user_table_name, meta, autoload=True)
local_user_table = sqlalchemy.Table(local_user_table_name, meta,
autoload=True)
password_table = sqlalchemy.Table(password_table_name, meta,
autoload=True)
actual_users = get_users_from_db(user_table, local_user_table,
password_table)
self.assertListEqual(expected_users, actual_users)
def test_migrate_user_with_null_password_to_password_tables(self):
USER_TABLE_NAME = 'user'
LOCAL_USER_TABLE_NAME = 'local_user'
PASSWORD_TABLE_NAME = 'password'
self.upgrade(90)
user_ref = unit.new_user_ref(uuid.uuid4().hex)
user_ref.pop('password')
# pop extra attribute which doesn't recognized by SQL expression
# layer.
user_ref.pop('email')
session = self.Session()
self.insert_dict(session, USER_TABLE_NAME, user_ref)
self.metadata.clear()
self.upgrade(91)
# migration should be successful.
self.assertTableCountsMatch(USER_TABLE_NAME, LOCAL_USER_TABLE_NAME)
# no new entry was added to the password table because the
# user doesn't have a password.
password_table = self.select_table(PASSWORD_TABLE_NAME)
rows = session.execute(password_table.count()).scalar()
self.assertEqual(0, rows)
def test_migrate_user_skip_user_already_exist_in_local_user(self):
USER_TABLE_NAME = 'user'
LOCAL_USER_TABLE_NAME = 'local_user'
self.upgrade(90)
user1_ref = unit.new_user_ref(uuid.uuid4().hex)
# pop extra attribute which doesn't recognized by SQL expression
# layer.
user1_ref.pop('email')
user2_ref = unit.new_user_ref(uuid.uuid4().hex)
user2_ref.pop('email')
session = self.Session()
self.insert_dict(session, USER_TABLE_NAME, user1_ref)
self.insert_dict(session, USER_TABLE_NAME, user2_ref)
user_id = user1_ref.pop('id')
user_name = user1_ref.pop('name')
domain_id = user1_ref.pop('domain_id')
local_user_ref = {'user_id': user_id, 'name': user_name,
'domain_id': domain_id}
self.insert_dict(session, LOCAL_USER_TABLE_NAME, local_user_ref)
self.metadata.clear()
self.upgrade(91)
# migration should be successful and user2_ref has been migrated to
# `local_user` table.
self.assertTableCountsMatch(USER_TABLE_NAME, LOCAL_USER_TABLE_NAME)
def test_implied_roles_fk_on_delete_cascade(self):
if self.engine.name == 'sqlite':
self.skipTest('sqlite backend does not support foreign keys')
self.upgrade(92)
def _create_three_roles():
id_list = []
for _ in range(3):
role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
id_list.append(role['id'])
return id_list
role_id_list = _create_three_roles()
self.role_api.create_implied_role(role_id_list[0], role_id_list[1])
self.role_api.create_implied_role(role_id_list[0], role_id_list[2])
# assert that there are two roles implied by role 0.
implied_roles = self.role_api.list_implied_roles(role_id_list[0])
self.assertThat(implied_roles, matchers.HasLength(2))
self.role_api.delete_role(role_id_list[0])
# assert the cascade deletion is effective.
implied_roles = self.role_api.list_implied_roles(role_id_list[0])
self.assertThat(implied_roles, matchers.HasLength(0))
def test_domain_as_project_upgrade(self):
def _populate_domain_and_project_tables(session):
# Three domains, with various different attributes
self.domains = [{'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True,
'extra': {'description': uuid.uuid4().hex,
'another_attribute': True}},
{'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True,
'extra': {'description': uuid.uuid4().hex}},
{'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': False}]
# Four projects, two top level, two children
self.projects = []
self.projects.append(unit.new_project_ref(
domain_id=self.domains[0]['id'],
parent_id=None))
self.projects.append(unit.new_project_ref(
domain_id=self.domains[0]['id'],
parent_id=self.projects[0]['id']))
self.projects.append(unit.new_project_ref(
domain_id=self.domains[1]['id'],
parent_id=None))
self.projects.append(unit.new_project_ref(
domain_id=self.domains[1]['id'],
parent_id=self.projects[2]['id']))
for domain in self.domains:
this_domain = domain.copy()
if 'extra' in this_domain:
this_domain['extra'] = json.dumps(this_domain['extra'])
self.insert_dict(session, 'domain', this_domain)
for project in self.projects:
self.insert_dict(session, 'project', project)
def _check_projects(projects):
def _assert_domain_matches_project(project):
for domain in self.domains:
if project.id == domain['id']:
self.assertEqual(domain['name'], project.name)
self.assertEqual(domain['enabled'], project.enabled)
if domain['id'] == self.domains[0]['id']:
self.assertEqual(domain['extra']['description'],
project.description)
self.assertEqual({'another_attribute': True},
json.loads(project.extra))
elif domain['id'] == self.domains[1]['id']:
self.assertEqual(domain['extra']['description'],
project.description)
self.assertEqual({}, json.loads(project.extra))
# We had domains 3 we created, which should now be projects acting
# as domains, To this we add the 4 original projects, plus the root
# of all domains row.
self.assertEqual(8, projects.count())
project_ids = []
for project in projects:
if project.is_domain:
self.assertEqual(NULL_DOMAIN_ID, project.domain_id)
self.assertIsNone(project.parent_id)
else:
self.assertIsNotNone(project.domain_id)
self.assertIsNotNone(project.parent_id)
project_ids.append(project.id)
for domain in self.domains:
self.assertIn(domain['id'], project_ids)
for project in self.projects:
self.assertIn(project['id'], project_ids)
# Now check the attributes of the domains came across OK
for project in projects:
_assert_domain_matches_project(project)
NULL_DOMAIN_ID = '<<keystone.domain.root>>'
self.upgrade(92)
session = self.Session()
_populate_domain_and_project_tables(session)
self.upgrade(93)
proj_table = sqlalchemy.Table('project', self.metadata, autoload=True)
projects = session.query(proj_table)
_check_projects(projects)
def test_add_federated_user_table(self):
federated_user_table = 'federated_user'
self.upgrade(93)
self.assertTableDoesNotExist(federated_user_table)
self.upgrade(94)
self.assertTableColumns(federated_user_table,
['id',
'user_id',
'idp_id',
'protocol_id',
'unique_id',
'display_name'])
def test_add_int_pkey_to_revocation_event_table(self):
meta = sqlalchemy.MetaData()
meta.bind = self.engine
REVOCATION_EVENT_TABLE_NAME = 'revocation_event'
self.upgrade(94)
revocation_event_table = sqlalchemy.Table(REVOCATION_EVENT_TABLE_NAME,
meta, autoload=True)
# assert id column is a string (before)
self.assertEqual('VARCHAR(64)', str(revocation_event_table.c.id.type))
self.upgrade(95)
meta.clear()
revocation_event_table = sqlalchemy.Table(REVOCATION_EVENT_TABLE_NAME,
meta, autoload=True)
# assert id column is an integer (after)
self.assertEqual('INTEGER', str(revocation_event_table.c.id.type))
class VersionTests(SqlMigrateBase):
_initial_db_version = migration_helpers.get_init_version()
def test_core_initial(self):
"""Get the version before migrated, it's the initial DB version."""
version = migration_helpers.get_db_version()
self.assertEqual(self._initial_db_version, version)
def test_core_max(self):
"""When get the version after upgrading, it's the new version."""
self.upgrade(self.max_version)
version = migration_helpers.get_db_version()
self.assertEqual(self.max_version, version)
def test_assert_not_schema_downgrade(self):
self.upgrade(self.max_version)
self.assertRaises(
db_exception.DbMigrationError,
migration_helpers._sync_common_repo,
self.max_version - 1)
def test_extension_not_controlled(self):
"""When get the version before controlling, raises DbMigrationError."""
self.assertRaises(db_exception.DbMigrationError,
migration_helpers.get_db_version,
extension='federation')
def test_unexpected_extension(self):
"""The version for a non-existent extension raises ImportError."""
extension_name = uuid.uuid4().hex
self.assertRaises(ImportError,
migration_helpers.get_db_version,
extension=extension_name)
def test_unversioned_extension(self):
"""The version for extensions without migrations raise an exception."""
self.assertRaises(exception.MigrationNotProvided,
migration_helpers.get_db_version,
extension='admin_crud')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",
"BahdanauAttention",
"hardmax",
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
pass
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(
lambda m: ops.convert_to_tensor(m, name="memory"), memory)
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None:
seq_len_mask = None
else:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
seq_len_batch_size = (
memory_sequence_length.shape[0].value
or array_ops.shape(memory_sequence_length)[0])
def _maybe_mask(m, seq_len_mask):
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
m_batch_size = m.shape[0].value or array_ops.shape(m)[0]
if memory_sequence_length is not None:
message = ("memory_sequence_length and memory tensor batch sizes do not "
"match.")
with ops.control_dependencies([
check_ops.assert_equal(
seq_len_batch_size, m_batch_size, message=message)]):
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
else:
return m
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
if memory_sequence_length is None:
return score
message = ("All values in memory_sequence_length must greater than zero.")
with ops.control_dependencies(
[check_ops.assert_positive(memory_sequence_length, message=message)]):
score_mask = array_ops.sequence_mask(
memory_sequence_length, maxlen=array_ops.shape(score)[1])
score_mask_values = score_mask_value * array_ops.ones_like(score)
return array_ops.where(score_mask, score, score_mask_values)
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=float("-inf"),
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be:
`probabilities = probability_fn(score, previous_alignments)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
if (query_layer is not None
and not isinstance(query_layer, layers_base.Layer)):
raise TypeError(
"query_layer is not a Layer: %s" % type(query_layer).__name__)
if (memory_layer is not None
and not isinstance(memory_layer, layers_base.Layer)):
raise TypeError(
"memory_layer is not a Layer: %s" % type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(score, memory_sequence_length, score_mask_value),
prev))
with ops.name_scope(
name, "BaseAttentionMechanismInit", nest.flatten(memory)):
self._values = _prepare_memory(
memory, memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
self._batch_size = (
self._keys.shape[0].value or array_ops.shape(self._keys)[0])
self._alignments_size = (self._keys.shape[1].value or
array_ops.shape(self._keys)[1])
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=float("-inf"),
name="LuongAttention"):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
if probability_fn is None:
probability_fn = nn_ops.softmax
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query, previous_alignments):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
previous_alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = self.keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the the keys' dimension (%s)?"
% (query, depth, self.keys, key_units, key_units))
dtype = query.dtype
with variable_scope.variable_scope(None, "luong_attention", [query]):
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_time, 1, max_time].
# we then squeee out the center singleton dimension.
score = math_ops.matmul(query, self.keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if self._scale:
# Scalar used in weight scaling
g = variable_scope.get_variable(
"attention_g", dtype=dtype, initializer=1.)
score = g * score
alignments = self._probability_fn(score, previous_alignments)
return alignments
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=float("-inf"),
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query, previous_alignments):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
previous_alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
dtype = processed_query.dtype
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
keys = self._keys
v = variable_scope.get_variable(
"attention_v", [self._num_units], dtype=dtype)
if self._normalize:
# Scalar used in weight normalization
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=math.sqrt((1. / self._num_units)))
# Bias added prior to the nonlinearity
b = variable_scope.get_variable(
"attention_b", [self._num_units], dtype=dtype,
initializer=init_ops.zeros_initializer())
# normed_v = g * v / ||v||
normed_v = g * v * math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(v)))
score = math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + b), [2])
else:
score = math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query),
[2])
alignments = self._probability_fn(score, previous_alignments)
return alignments
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time", "alignments",
"alignment_history"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: The alignment emitted at the previous time step.
- `alignment_history`: (if enabled) a `TensorArray` containing alignment
matrices from all time steps. Call `stack()` to convert to a `Tensor`.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
return super(AttentionWrapperState, self)._replace(**kwargs)
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if logits.get_shape()[-1].value is not None:
depth = logits.get_shape()[-1].value
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
class AttentionWrapper(rnn_cell_impl.RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
"""Construct the `AttentionWrapper`.
Args:
cell: An instance of `RNNCell`.
attention_mechanism: An instance of `AttentionMechanism`.
attention_layer_size: Python integer, the depth of the attention (output)
layer. If None (default), use the context as attention at each time
step. Otherwise, feed the context and cell output into the attention
layer to generate attention at each time step.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the beahvior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
"""
super(AttentionWrapper, self).__init__(name=name)
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError(
"cell must be an RNNCell, saw type: %s" % type(cell).__name__)
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be a AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None:
self._attention_layer = layers_core.Dense(
attention_layer_size, name="attention_layer", use_bias=False)
self._attention_layer_size = attention_layer_size
else:
self._attention_layer = None
self._attention_layer_size = attention_mechanism.values.get_shape()[
-1].value
self._cell = cell
self._attention_mechanism = attention_mechanism
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
[check_ops.assert_equal(state_batch_size,
self._attention_mechanism.batch_size,
message=error_message)]):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._attention_mechanism.alignments_size,
alignment_history=()) # alignment_history is sometimes a TensorArray
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
[check_ops.assert_equal(batch_size,
self._attention_mechanism.batch_size,
message=error_message)]):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
if self._alignment_history:
alignment_history = tensor_array_ops.TensorArray(
dtype=dtype, size=0, dynamic_size=True)
else:
alignment_history = ()
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._attention_mechanism.initial_alignments(
batch_size, dtype),
alignment_history=alignment_history)
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
"""
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or array_ops.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
[check_ops.assert_equal(cell_batch_size,
self._attention_mechanism.batch_size,
message=error_message)]):
cell_output = array_ops.identity(
cell_output, name="checked_cell_output")
alignments = self._attention_mechanism(
cell_output, previous_alignments=state.alignments)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, attention_mechanism.num_units]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, attention_mechanism.num_units].
# we then squeeze out the singleton dim.
attention_mechanism_values = self._attention_mechanism.values
context = math_ops.matmul(expanded_alignments, attention_mechanism_values)
context = array_ops.squeeze(context, [1])
if self._attention_layer is not None:
attention = self._attention_layer(
array_ops.concat([cell_output, context], 1))
else:
attention = context
if self._alignment_history:
alignment_history = state.alignment_history.write(
state.time, alignments)
else:
alignment_history = ()
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
alignments=alignments,
alignment_history=alignment_history)
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
|
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import threading
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import HOST, HOSTv6
TIMEOUT = 3
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0)) as sock:
sock.settimeout(TIMEOUT)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0),
family=socket.AF_INET6) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.daemon = True
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
socket = context.wrap_socket(self.socket,
suppress_ragged_eofs=False,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
# TODO: SSLError does not expose alert information
elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError,
EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024), LIST_DATA.encode('ascii'))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
# consume from SSL socket to finalize handshake and avoid
# "SSLError [SSL] shutdown while in init"
self.assertEqual(sock.recv(1024), LIST_DATA.encode('ascii'))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024), LIST_DATA.encode('ascii'))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
@skipUnless(False, "FIXME: bpo-32706")
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.daemon = True
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.server_thread = None
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
class MiscTestCase(TestCase):
def test__all__(self):
blacklist = {'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF',
'Error', 'parse150', 'parse227', 'parse229', 'parse257',
'print_line', 'ftpcp', 'test'}
support.check__all__(self, ftplib, blacklist=blacklist)
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass,
MiscTestCase]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
watchonly_vout = find_vout_for_address(self.nodes[0], watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid merelcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
# Again lock the watchonly UTXO or nodes[0] may spend it, because
# lockunspent is memory-only and thus lost on restart
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 1*min_relay_tx_fee}) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
|
|
#! /usr/bin/env python
import shlex, argparse, cmd
import logging
import random, time, os, sys
import multiprocessing
import signal
################################################################
class Conversions(object):
"""Support conversions of storage units with SI-ish units."""
import re as _re
_spec = _re.compile('''^\s*(?P<digits>[0-9]+)(?P<frac>\.[0-9]*)?\s*(?P<spec>.*)\s*$''')
# Not quite SI units... We allow suffix of B for Bytes.
#
# We use lowercase for powers of 10, uppercase for powers of 1024,
# even when there is ambiguity in SI units or no lowercase form.
#
_mult = {
'k': 1000,
'kB': 1000,
'K': 1024,
'KB': 1024,
'KiB': 1024,
'm': 1000 * 1000,
'mB': 1000 * 1000,
'M': 1024 * 1024,
'MB': 1024 * 1024,
'MiB': 1024 * 1024,
'g': 1000 * 1000 * 1000,
'gB': 1000 * 1000 * 1000,
'G': 1024 * 1024 * 1024,
'GB': 1024 * 1024 * 1024,
'GiB': 1024 * 1024 * 1024,
't': 1000 * 1000 * 1000 * 1000,
'tB': 1000 * 1000 * 1000 * 1000,
'T': 1024 * 1024 * 1024 * 1024,
'TB': 1024 * 1024 * 1024 * 1024,
'TiB': 1024 * 1024 * 1024 * 1024,
'p': 1000 * 1000 * 1000 * 1000 * 1000,
'pB': 1000 * 1000 * 1000 * 1000 * 1000,
'P': 1024 * 1024 * 1024 * 1024 * 1024,
'PB': 1024 * 1024 * 1024 * 1024 * 1024,
'PiB': 1024 * 1024 * 1024 * 1024 * 1024,
'e': 1000 * 1000 * 1000 * 1000 * 1000 * 1000,
'eB': 1000 * 1000 * 1000 * 1000 * 1000 * 1000,
'E': 1024 * 1024 * 1024 * 1024 * 1024 * 1024,
'EB': 1024 * 1024 * 1024 * 1024 * 1024 * 1024,
'EiB': 1024 * 1024 * 1024 * 1024 * 1024 * 1024,
}
_div = (
(_mult['KiB'], 1, 'B'),
(_mult['MiB'], _mult['KiB'], 'KiB'),
(_mult['GiB'], _mult['MiB'], 'MiB'),
(_mult['TiB'], _mult['GiB'], 'GiB'),
(_mult['PiB'], _mult['TiB'], 'TiB'),
(_mult['EiB'], _mult['PiB'], 'PiB'),
( None, _mult['EiB'], 'EiB'))
@classmethod
def datasize2int(cls,s):
"""Convert a data size specification into a long integer."""
m = cls._spec.match(s)
if m is not None:
digits = m.group('digits')
frac = m.group('frac')
if frac is not None:
i = float(digits+frac)
else:
i = long(digits)
ds = m.group('spec')
if ds == '':
return i
else:
try:
return long(i * cls._mult[ds])
except:
pass
raise DataSizeError, s
@classmethod
def int2datasize(cls,v):
"""Convert an integer into a data size specification string."""
for b,d,ds in cls._div:
if b is None or v < b:
return str(v/d) + ds
class DataSizeError(Exception):
def __init__(self, ds):
self._ds = ds
def __repr__(self):
return repr(self._ds)
################################################################
class BlockDeviceOps(object):
"""Collect common operations on block devices."""
# FIXME: Derive a class for Linux, have an abstract base class.
@classmethod
def set_io_variable(cls, dev, var, val):
"""Set a I/O elevator variable on the device."""
import os.path, glob
# Find the device under /sys/block. We may need to resolve
# symlinks.
dev = os.path.realpath(dev)
key = os.path.basename(dev)
if not os.path.isdir('/sys/block/' + key):
logging.warn('Unable to manipulate I/O tunable %s for %s', var, dev)
return
# We need to manage slave settings first.
for s in glob.iglob('/sys/block/'+key+'/slaves/*'):
cls.set_io_variable(s, var, val)
# Now set it for the master.
with open('/sys/block/'+key+'/'+var, 'w') as f:
f.write(str(val))
@classmethod
def set_io_scheduler(cls, dev, sched):
"""Set the I/O scheduler"""
cls.set_io_variable(dev, 'queue/scheduler', sched)
@classmethod
def set_io_transfer_size(cls, dev, s):
"""Set the I/O transfer size to the device"""
cls.set_io_variable(dev, 'queue/max_sectors_kb', s)
@classmethod
def set_io_readahead_size(cls, dev, s):
"""Set the I/O readahead size to the device"""
cls.set_io_variable(dev, 'queue/read_ahead_kb', s)
@classmethod
def set_io_deadline_fifo_batch(cls, dev, s):
"""Set the deadline scheduler fifo batch size."""
cls.set_io_variable(dev, 'queue/iosched/fifo_batch', s)
################################################################
class TargetData(object):
"""Describe a target, typically a block device."""
@staticmethod
def build_parser(sp, cmdname):
p = sp.add_parser(cmdname, description='Define a target for testing.')
p.add_argument(
'target', type=str,
help='name of the target')
p.add_argument(
'device', type=str,
help='path to device')
p.add_argument(
'--size', type=Conversions.datasize2int, required=True,
help='size of the disk array')
p.add_argument(
'--count', type=int, default=1,
help='number of data disks in disk array')
# Stripe/segment size
g = p.add_mutually_exclusive_group(required=True)
g.add_argument(
'--segment', type=Conversions.datasize2int,
help='segment length on each disk of array')
g.add_argument(
'--stripe', type=Conversions.datasize2int,
help='stripe size of the array')
return p
def __init__(self, n):
self.target = n.target
self.device = n.device
self.dev_length = n.size
self.data_disks = n.count
if n.segment is not None:
self.segment_len = n.segment
self.stripe_len = n.count * n.segment
else:
self.stripe_len = n.stripe
self.segment_len = n.stripe / n.count
################################################################
class TestData(object):
"""Describe a collection of test parameters"""
@staticmethod
def build_parser(sp, cmdname):
p = sp.add_parser(cmdname, description='Define an i/o test')
p.add_argument(
'testname', type=str,
help='name of the test')
p.add_argument(
'--blocksize', type=Conversions.datasize2int,
default=Conversions.datasize2int('4MiB'),
help='File system block size')
p.add_argument(
'--misalignment', type=Conversions.datasize2int,
default=Conversions.datasize2int('0'),
help='IOP misalignment factor from start of block')
p.add_argument(
'--iop', type=Conversions.datasize2int,
default=Conversions.datasize2int('4MiB'),
help='Size of each IOP')
p.add_argument(
'--transfer', type=Conversions.datasize2int,
default=Conversions.datasize2int('2GiB'),
help='Total amount of data written during test')
p.add_argument(
'--scheduler', type=str,
choices=('noop', 'deadline', 'cfq', 'anticipatory'),
default='noop',
help='I/O elevator scheduler to use')
p.add_argument(
'--hwtransfer', type=Conversions.datasize2int,
help='Hardware transfer size to use for each IOP')
p.add_argument(
'--hwreadahead', type=Conversions.datasize2int,
help='Hardware readahead to use for each IOP')
return p
def __init__(self, n):
self.testname = n.testname
self.block_size = n.blocksize
self.misalignment = n.misalignment
self.iop_size = n.iop
self.transfer_size = n.transfer
self.scheduler = n.scheduler
self.hwtransfer = n.hwtransfer
self.hwreadahead = n.hwreadahead
################################################################
class HostData(object):
"""Describe a host and how to communicate with it."""
@staticmethod
def build_parser(sp, cmdname):
p = sp.add_parser(cmdname,
description='Describe a host for targets.')
p.add_argument(
'hostname', type=str,
help='name of the host')
p.add_argument(
'--workload', type=str, default=sys.argv[0],
help='location of workload script on remote system')
return p
def __init__(self, n):
self.hostname = n.hostname
self.workload = n.workload
################################################################
class BaseTestThread(multiprocessing.Process):
"""Base class for test threads."""
def __init__(self, ti, rank):
multiprocessing.Process.__init__(self)
self.daemon = True
self.ti = ti
self.rank = rank
self.d = os.open(self.ti.device, os.O_RDWR)
def wait_join(self):
"""Wait for the thread to stop, handling signals."""
while self.is_alive():
self.join(1)
class WriteTestThread(BaseTestThread):
"""Thread for testing writes."""
def run(self):
"""Run the thread's portion of the test."""
d = self.d
ti = self.ti
iop_size = ti.iop_size
loclist = ti.loclist
outfile = ti.outfile
stop_event = ti.stop_event
fails = 0
for i in range(self.rank, self.ti.iop_cnt, self.ti.wthreads):
if stop_event.is_set():
break
loc = loclist[i]
os.lseek(d, loc, os.SEEK_SET)
if os.write(d, ti.iop_bytes(loc)) < iop_size:
logging.warn('Short write!!! IOP #%i', i)
fails += 1
# We might want this to be configurable
if fails > 3:
break
if not stop_event.is_set():
os.fsync(d)
os.close(d)
class ReadTestThread(BaseTestThread):
"""Thread for testing reads."""
def run(self):
"""Run the thread's portion of the test."""
d = self.d
ti = self.ti
iop_size = ti.iop_size
loclist = ti.loclist
outfile = ti.outfile
stop_event = ti.stop_event
fails = 0
for i in range(self.rank, self.ti.iop_cnt, self.ti.rthreads):
if stop_event.is_set():
break
loc = loclist[i]
os.lseek(d, loc, os.SEEK_SET)
junk = os.read(d, iop_size)
if len(junk) < iop_size:
logging.warn('Short read!!! IOP %i', i)
fails += 1
if fails > 3:
break
continue
if junk != ti.iop_bytes(loc):
logging.warn('Read does not match! IOP # %i', i)
fails += 1
if fails > 3:
break
os.close(d)
class TestInstance(object):
"""Implement an I/O test instance."""
def __init__(self, test, target, outfile, wthreads, rthreads):
"""Build an instance that conducts the different phases of
testing."""
self.testname = test.testname
self.targetname = target.target
self.outfile = outfile
self.wthreads = wthreads
self.rthreads = rthreads
self.device = target.device
self.dev_length = target.dev_length
self.segment_len = target.segment_len
self.stripe_len = target.stripe_len
self.transfer_size = test.transfer_size
if test.block_size is not None:
self.block_size = test.block_size
else:
self.block_size = target.segment_len * target.data_disks
if test.iop_size is not None:
self.iop_size = test.iop_size
else:
self.iop_size = self.block_size
self.misalignment = test.misalignment
self.scheduler = test.scheduler
self.hwtransfer = test.hwtransfer
self.hwreadahead = test.hwreadahead
#
self.iop_cnt = self.transfer_size / self.iop_size
self.stop_event = None
def generate_random_bytes(self):
"""Generate and store some random data for writing."""
bytes = []
for i in range(self.iop_size + self.iop_cnt):
bytes.append( chr(random.randint(0,255)) )
bytes = ''.join(bytes)
self.bytes = bytes
def iop_bytes(self, loc):
"""Using the byte store, get bytes for this IOP.
We need to make sure that overlapping bytes will somehow get
the same pattern."""
iop_size = self.iop_size
bytes = self.bytes
bl = len(bytes)
block_size = self.block_size
res = ''
while len(res) < iop_size:
# Figure out what block we are in, and initial offset in block.
start = loc + len(res)
bn = start / block_size
offset = start % block_size
require = min( (iop_size - len(res)), (block_size-offset) )
# Figure out an initial offset in bytes, based on bn.
# This is a bit arbitrary.
bo = (37*bn + offset) % bl
# Build a result
ra = bytes[bo:require]
if len(ra) < require:
ra += bytes[0:require - len(ra)]
res += ra
return res
def generate_random_positions(self):
"""Generate a set of positions for random I/O testing"""
dev_length = self.dev_length
iop_size = self.iop_size
iop_cnt = self.iop_cnt
block_size = self.block_size
misalignment = self.misalignment
# What is the maximum misalignment?
if misalignment > 0:
mmf = (block_size / misalignment)
else:
mmf = 0
max_misalignment = mmf*misalignment
#
max_block_loc = ((dev_length - iop_size - max_misalignment)
/ block_size) * block_size
# Get a collection of random locations for writing
loclist = random.sample(xrange(0,max_block_loc, block_size), iop_cnt)
if misalignment > 0:
for i in range(len(blocklist)):
loclist[i] += random.randint(0,mmf)*misalignment
self.loclist = loclist
def prep_test(self):
self.generate_random_bytes()
self.generate_random_positions()
dev = self.device
BlockDeviceOps.set_io_scheduler(dev, self.scheduler)
if self.hwtransfer is not None:
BlockDeviceOps.set_io_transfer_size(dev, self.hwtransfer)
if self.hwreadahead is not None:
BlockDeviceOps.set_io_readahead_size(dev, self.hwreadahead)
def run_test(self):
# Get values
outfile = self.outfile
iop_size = self.iop_size
iop_cnt = self.iop_cnt
block_size = self.block_size
transfer_size = self.transfer_size
loclist = self.loclist
bytes = self.bytes
device = self.device
# Print some information about the test.
outfile.write('\n\nTest %s on target %s\n'
% (self.testname, self.targetname))
outfile.write('Date = %s\n' % ( time.ctime(), ))
outfile.write('Device = %s\n' % (device,))
outfile.write('iop_size = %s\n' % (Conversions.int2datasize(iop_size),))
outfile.write('block_size = %s\n'
% (Conversions.int2datasize(block_size),))
outfile.write('transfer_size = %s\n'
% (Conversions.int2datasize(transfer_size),))
outfile.write('segment_len = %s\n'
% (Conversions.int2datasize(self.segment_len),))
outfile.write('stripe_len = %s\n'
% (Conversions.int2datasize(self.stripe_len),))
outfile.write('misalignment = %s\n'
% (Conversions.int2datasize(self.misalignment),))
outfile.write('scheduler = %s\n' % (self.scheduler,))
if self.hwtransfer is None:
outfile.write('hwtransfer = \n')
else:
outfile.write('hwtransfer = %s\n'
% (Conversions.int2datasize(self.hwtransfer),))
if self.hwreadahead is None:
outfile.write('hwreadahead = \n')
else:
outfile.write('hwreadahead = %s\n'
% (Conversions.int2datasize(self.hwreadahead),))
outfile.write('wthreads = %d\n' % (self.wthreads,))
outfile.write('rthreads = %d\n' % (self.rthreads,))
outfile.write('Minimum seek = %s\n' % (min(loclist),))
outfile.write('Maximum seek = %s\n' % (max(loclist),))
outfile.flush()
# Set up event for interrupts.
self.stop_event = multiprocessing.Event()
self.def_handler = signal.getsignal(signal.SIGINT)
try:
signal.signal(signal.SIGINT, self.int_handler)
# Set up threads for writing.
writers = [ WriteTestThread(self, i) for i in range(self.wthreads) ]
# Perform writes.
startw = time.time()
for w in writers:
w.start()
for w in writers:
w.wait_join()
endw = time.time()
# Print results for writing
timeW = endw - startw
outfile.write('write time = %g seconds (%g MiB/sec)\n' % \
( timeW,
transfer_size/timeW/1000000 ) )
outfile.flush()
if self.stop_event.is_set():
return
# Set up threads for reading.
readers = [ ReadTestThread(self, i) for i in range(self.rthreads) ]
# Perform reads.
startr = time.time()
for r in readers:
r.start()
for r in readers:
r.wait_join()
endr = time.time()
# Print results for reading
timeR = endr - startr
outfile.write('read time = %g seconds (%g MiB/sec)\n' % \
( timeR,
transfer_size/timeR/1000000 ) )
outfile.flush()
return
finally:
signal.signal(signal.SIGINT, self.def_handler)
self.stop_event = None
def int_handler(self, signum, frame):
"""Handle keyboard interrupts."""
if self.stop_event is not None:
self.stop_event.set()
else:
self.def_handler(signum, frame)
################################################################
class RunTest(object):
"""Implement an invocation of a test (a test object applied to
data objects)."""
@staticmethod
def build_parser(cmdname):
p = argparse.ArgumentParser(
prog=cmdname,
description='Run I/O test.')
p.add_argument('test', type=str, help='Name of a test')
p.add_argument('target', type=str, help='Name of target')
p.add_argument('--wthreads', type=int, default=1,
help='Number of threads for writing')
p.add_argument('--rthreads', type=int, default=1,
help='Number of threads for reading')
return p
def __init__(self, n, targets, tests, outfile):
"""Initialize the object representing a test+target."""
self.usable = True
try:
test = tests[n.test]
except:
logging.error('Unknown test %s', n.test)
self.usable = False
try:
target = targets[n.target]
except:
logging.error('Unknown target %s', n.target)
self.usable = False
# Create the test object.
if self.usable:
self.ti = TestInstance(test, target, outfile,
n.wthreads, n.rthreads)
def run_test(self):
"""Run the test."""
if self.usable:
self.ti.prep_test()
self.ti.run_test()
################################################################
class IOTester(cmd.Cmd):
"""Command interpreter for the I/O workload tester.
This uses the Python cmd module to parse commands. We subvert it
a bit to work better from scripts, but prefer it to shlex since it
allows us to (potentially) provide CLI features such as command
completion.
The argparse module is used to parse arguments to individual
commands, as well as subcommands. It too is somewhat subverted to
work better with scripts."""
def __init__(self, cmdfile, outfile):
# Create parsers
ap = self.ap_parse_define = argparse.ArgumentParser(
prog='define',
description='Define an object for the I/O tester')
sp = ap.add_subparsers()
p = TargetData.build_parser(sp, 'target')
p.set_defaults(func=self._do_define_target)
p = TestData.build_parser(sp, 'test')
p.set_defaults(func=self._do_define_test)
p = HostData.build_parser(sp, 'host')
p.set_defaults(func=self._do_define_host)
self.ap_parse_run = RunTest.build_parser('run')
self._cmd = ''
cmd.Cmd.__init__(self, stdin=cmdfile)
if cmdfile != sys.stdin:
self.use_rawinput = False
self.prompt = ''
else:
self.prompt = 'iot: '
#
self.targets = {}
self.tests = {}
self.hosts = {}
self.outfile = outfile
def emptyline(self):
return False
def precmd(self, line):
# Help deal with continued lines
if line == '' or line[-1] != '\\':
# Command is finished here
res = self._cmd + line
self._cmd = ''
lres = res.lstrip()
if lres != '' and lres[0] == '#':
return ''
return res
if line != '':
# Continued command (ends with backslash)
self._cmd = self._cmd + line[:-1]
return ''
def postcmd(self, stop, line):
# Handle prompting for continued lines
if self.use_rawinput:
if self._cmd == '':
self.prompt = 'iot: '
else:
self.prompt = '____ '
return stop
# "exit"
def do_exit(self, cs):
return True
do_EOF = do_exit
def help_exit(self):
print 'usage: exit'
print ''
print 'Exit from iot'
help_EOF = help_exit
# "define"
def do_define(self, cs):
L = shlex.split(cs)
try:
n = self.ap_parse_define.parse_args(L)
except SystemExit as e:
return False
except:
return True
n.func(n)
return False
def help_define(self):
self.ap_parse_define.print_help()
def _do_define_target(self, n):
self.targets[n.target] = TargetData(n)
def _do_define_test(self, n):
self.tests[n.testname] = TestData(n)
def _do_define_host(self, n):
self.hosts[n.hostname] = HostData(n)
# "run"
def do_run(self, cs):
L = shlex.split(cs)
try:
n = self.ap_parse_run.parse_args(L)
except SystemExit as e:
return False
except:
return True
r = RunTest(n, self.targets, self.tests, self.outfile)
r.run_test()
return False
def help_run(self):
self.ap_parse_run.print_help()
def main():
# Set up logging.
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
# Define command line parser.
p = argparse.ArgumentParser(description='Test I/O device performance.')
p.add_argument('cmdfile', type=argparse.FileType('r'),
nargs='?', default='-')
p.add_argument('--output', type=argparse.FileType('w'), default='-',
help='File for output of test results')
n = p.parse_args()
t = IOTester(n.cmdfile, n.output)
t.cmdloop()
sys.exit(0)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a search campaign with the AdWords and Google Ads APIs.
This code example is the fifth in a series of code examples that shows how to
create a Search campaign using the AdWords API, and then migrate it to the
Google Ads API one functionality at a time. See other examples in this directory
for code examples in various stages of migration.
In this code example, the functionality to create campaign budget and search
campaign have been migrated to the Google Ads API. The rest of the functionality
- creating ad groups, keywords and expanded text ads are done using the AdWords
API.
"""
import argparse
import datetime
import sys
import urllib.parse
import uuid
from googleads import adwords
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
# Number of ads being added/updated in this code example.
NUMBER_OF_ADS = 5
# The list of keywords being added in this code example.
KEYWORDS_TO_ADD = ["mars cruise", "space hotel"]
PAGE_SIZE = 1000
def _create_campaign_budget(client, customer_id):
"""Creates a new campaign budget and returns it.
Args:
client: A GoogleAdsClient instance.
customer_id: (str) Customer ID associated with the account.
Returns:
A CampaignBudget.
"""
campaign_service = client.get_service("CampaignBudgetService")
operation = client.get_type("CampaignBudgetOperation")
criterion = operation.create
criterion.name = f"Interplanetary Cruise Budget {uuid.uuid4()}"
criterion.delivery_method = client.enums.BudgetDeliveryMethodEnum.STANDARD
criterion.amount_micros = 500000
try:
response = campaign_service.mutate_campaign_budgets(
customer_id=customer_id, operations=[operation]
)
campaign_budget_resource_name = response.results[0].resource_name
new_campaign_budget = _get_campaign_budget(
client, customer_id, campaign_budget_resource_name
)
print(f"Added budget named {new_campaign_budget.name}")
return new_campaign_budget
except GoogleAdsClient as ex:
_handle_googleads_exception(ex)
def _get_campaign_budget(client, customer_id, resource_name):
"""Retrieves the CampaignBudget associated with the given resource name.
Args:
client: A GoogleAdsClient instance.
customer_id: (str) Customer ID associated with the account.
resource_name: (str) Resource name associated with the newly created
campaign.
Returns:
A CampaignBudget.
"""
ga_service = client.get_service("GoogleAdsService")
query = f'''
SELECT
campaign_budget.id,
campaign_budget.name,
campaign_budget.resource_name
FROM campaign_budget
WHERE campaign_budget.resource_name = "{resource_name}"'''
request = client.get_type("SearchGoogleAdsRequest")
request.customer_id = customer_id
request.query = query
request.page_size = PAGE_SIZE
try:
response = ga_service.search(request=request)
budget = list(response)[0].campaign_budget
return budget
except GoogleAdsException as ex:
_handle_googleads_exception(ex)
def _create_campaign(client, customer_id, campaign_budget):
"""Creates a new campaign and returns it.
Args:
client: A GoogleAdsClient instance.
customer_id: (str) Customer ID associated with the account.
campaign_budget: A CampaignBudget.
Returns:
A Campaign.
"""
operation = client.get_type("CampaignOperation")
campaign = operation.create
campaign_service = client.get_service("CampaignService")
campaign.name = f"Interplanetary Cruise#{uuid.uuid4()}"
campaign.advertising_channel_type = (
client.enums.AdvertisingChannelTypeEnum.SEARCH
)
# Recommendation: Set the campaign to PAUSED when creating it to stop the
# ads from immediately serving. Set to ENABLED once you've added
# targeting and the ads are ready to serve.
campaign.status = client.enums.CampaignStatusEnum.PAUSED
campaign.manual_cpc.enhanced_cpc_enabled = True
campaign.campaign_budget = campaign_budget.resource_name
campaign.network_settings.target_google_search = True
campaign.network_settings.target_search_network = True
campaign.network_settings.target_content_network = False
campaign.network_settings.target_partner_search_network = False
campaign.start_date = (
datetime.datetime.now() + datetime.timedelta(1)
).strftime("%Y%m%d")
campaign.end_date = (
datetime.datetime.now() + datetime.timedelta(365)
).strftime("%Y%m%d")
try:
response = campaign_service.mutate_campaigns(
customer_id=customer_id, operations=[operation]
)
campaign_resource_name = response.results[0].resource_name
new_campaign = _get_campaign(
client, customer_id, campaign_resource_name
)
print(f"Added campaign named {new_campaign.name}")
return new_campaign
except GoogleAdsException as ex:
_handle_googleads_exception(ex)
def _get_campaign(client, customer_id, campaign_resource_name):
"""Retrieves the Campaign associated with the given resource name.
Args:
client: A GoogleAdsClient instance.
customer_id: (str) Customer ID associated with the account.
campaign_resource_name: (str) Resource name associated with the newly
created campaign budget.
Returns:
A Campaign.
"""
ga_service = client.get_service("GoogleAdsService")
query = f'''
SELECT
campaign.id,
campaign.name,
campaign.resource_name
FROM campaign
WHERE campaign.resource_name = "{campaign_resource_name}"'''
request = client.get_type("SearchGoogleAdsRequest")
request.customer_id = customer_id
request.query = query
request.page_size = PAGE_SIZE
try:
response = ga_service.search(request=request)
campaign = list(response)[0].campaign
return campaign
except GoogleAdsException as ex:
_handle_googleads_exception(ex)
def _create_ad_group(client, customer_id, campaign):
"""Creates a new ad group and returns it.
Args:
client: A GoogleAdsClient instance.
customer_id: (str) Customer ID associated with the account.
campaign: A Campaign.
Returns:
An AdGroup.
"""
operation = client.get_type("AdGroupOperation")
adgroup = operation.create
adgroup_service = client.get_service("AdGroupService")
adgroup.name = f"Earth to Mars Cruises #{uuid.uuid4()}"
adgroup.campaign = campaign.resource_name
adgroup.status = client.enums.AdGroupStatusEnum.ENABLED
adgroup.type = client.enums.AdGroupTypeEnum.SEARCH_STANDARD
adgroup.cpc_bid_micros = 10000000
try:
response = adgroup_service.mutate_ad_groups(
customer_id=customer_id, operations=[operation]
)
ad_group_resource_name = response.results[0].resource_name
ad_group = _get_ad_group(client, customer_id, ad_group_resource_name)
print(f"Added AdGroup named {ad_group.name}")
return ad_group
except GoogleAdsException as ex:
_handle_googleads_exception(ex)
def _get_ad_group(client, customer_id, ad_group_resource_name):
"""Retrieves an AdGroup associated with the given resource name.
Args:
client: A GoogleAdsClient instance.
customer_id: (str) Customer ID associated with the account.
ad_group_resource_name: (str) Resource name associated with the newly
created Ad group.
Returns:
An AdGroup.
"""
ga_service = client.get_service("GoogleAdsService")
query = f'''
SELECT
ad_group.id,
ad_group.name,
ad_group.resource_name
FROM ad_group
WHERE ad_group.resource_name = "{ad_group_resource_name}"'''
request = client.get_type("SearchGoogleAdsRequest")
request.customer_id = customer_id
request.query = query
request.page_size = PAGE_SIZE
try:
response = ga_service.search(request=request)
adGroup = list(response)[0].ad_group
return adGroup
except GoogleAdsException as ex:
_handle_googleads_exception(ex)
def _create_text_ads(client, customer_id, ad_group):
"""Creates new text ads in a given ad group.
Args:
client: A GoogleAdsClient instance.
customer_id: (str) Customer ID associated with the account.
ad_group: A AdGroup instance.
"""
adgroup_service = client.get_service("AdGroupAdService")
operations = []
for i in range(0, NUMBER_OF_ADS):
operation = client.get_type("AdGroupAdOperation")
ad_group_operation = operation.create
ad_group_operation.ad_group = ad_group.resource_name
ad_group_operation.status = client.enums.AdGroupAdStatusEnum.PAUSED
ad_group_operation.ad.expanded_text_ad.headline_part1 = (
f"Cruise to Mars #{str(uuid.uuid4())[:4]}"
)
ad_group_operation.ad.expanded_text_ad.headline_part2 = (
"Best Space Cruise Line"
)
ad_group_operation.ad.expanded_text_ad.description = (
"Buy your tickets now!"
)
ad_group_operation.ad.final_urls.append("http://www.example.com")
operations.append(operation)
try:
ad_group_ad_response = adgroup_service.mutate_ad_group_ads(
customer_id=customer_id, operations=operations
)
new_ad_resource_names = [
row.resource_name for row in ad_group_ad_response.results
]
except GoogleAdsException as ex:
_handle_googleads_exception(ex)
new_ads = _get_ads(client, customer_id, new_ad_resource_names)
for new_ad in new_ads:
print(
f"Created expanded text ad with ID {new_ad.ad.id}, status "
f"{new_ad.status} and headline "
f"{new_ad.ad.expanded_text_ad.headline_part1}."
f"{new_ad.ad.expanded_text_ad.headline_part2}"
)
def _get_ads(client, customer_id, new_ad_resource_names):
"""Retrieves a list of AdGroupAds.
Args:
client: A GoogleAdsClient instance.
customer_id: (str) Customer ID associated with the account.
new_ad_resource_names: (str) Resource name associated with the Ad group.
Returns:
A list of AdGroupAds.
"""
def _formatter(given_string):
"""Assigns ' ' to names of resources.
This produces a formatted string that can be used within an IN clause.
Args:
given_string: (str) The string to be formatted.
Returns:
The formatted string.
"""
results = []
for i in given_string:
results.append(repr(i))
return ",".join(results)
resource_names = _formatter(new_ad_resource_names)
ga_service = client.get_service("GoogleAdsService")
query = f"""
SELECT
ad_group_ad.ad.id,
ad_group_ad.ad.expanded_text_ad.headline_part1,
ad_group_ad.ad.expanded_text_ad.headline_part2,
ad_group_ad.status,
ad_group_ad.ad.final_urls,
ad_group_ad.resource_name
FROM ad_group_ad
WHERE ad_group_ad.resource_name in ({resource_names})"""
request = client.get_type("SearchGoogleAdsRequest")
request.customer_id = customer_id
request.query = query
request.page_size = PAGE_SIZE
try:
response = ga_service.search(request=request)
return [row.ad_group_ad for row in response.results]
except GoogleAdsException as ex:
_handle_googleads_exception(ex)
def _create_keywords(client, ad_group_id, keywords_to_add):
"""Populates keywords on a given ad group ID.
Args:
client: An instance of the googleads.adwords.AdWordsClient class.
ad_group_id: (str) ad group ID to be referenced while creating text ads.
keywords_to_add: (list) A list of keywords to be added to a given ad
group.
"""
ad_group_criterion_service = client.GetService(
"AdGroupCriterionService", "v201809"
)
operations = []
for keyword in keywords_to_add:
operation = {
"xsi_type": "BiddableAdGroupCriterion",
"adGroupId": ad_group_id,
"criterion": {
"xsi_type": "Keyword",
"text": keyword,
"matchType": "BROAD",
},
"userStatus": "PAUSED",
"finalUrls": [
(
"http://www.example.com/mars/cruise/"
f"?kw={urllib.parse.quote(keyword)}"
)
],
}
create_keyword = {"operator": "ADD", "operand": operation}
operations.append(create_keyword)
results = ad_group_criterion_service.mutate(operations)
for result in results["value"]:
print(
f'Keyword with ad group ID {result["adGroupId"]}, keyword ID '
f'{result["criterion"]["id"]}, text '
f'{result["criterion"]["text"]} and match'
f'type {result["criterion"]["matchType"]} was created'
)
def _handle_googleads_exception(exception):
print(
f'Request with ID "{exception.request_id}" failed with status '
f'"{exception.error.code().name}" and includes the following errors:'
)
for error in exception.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
if __name__ == "__main__":
# Initialize client object.
# It will read the config file. The default file path is the Home Directory.
googleads_client = GoogleAdsClient.load_from_storage(version="v10")
adwords_client = adwords.AdWordsClient.LoadFromStorage()
parser = argparse.ArgumentParser(
description="Lists all campaigns for specified customer."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
args = parser.parse_args()
budget = _create_campaign_budget(googleads_client, args.customer_id)
campaign = _create_campaign(googleads_client, args.customer_id, budget)
ad_group = _create_ad_group(googleads_client, args.customer_id, campaign)
_create_text_ads(googleads_client, args.customer_id, ad_group)
_create_keywords(adwords_client, ad_group.id, KEYWORDS_TO_ADD)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, f, ci, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `3`.
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wco, wcf, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = gen_lstm_ops.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wco, wcf, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad, wcf_grad,
b_grad) = gen_lstm_ops.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wco,
wcf,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad,
wcf_grad, b_grad]
class LSTMBlockCell(core_rnn_cell.RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `core_rnn_cell.LSTMCell`, this is a monolithic op and should be much
faster. The weight and bias matrices should be compatible as long as the
variable scope matches.
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_peephole=False):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._names = {
"W": "weights",
"b": "biases",
"wci": "w_i_diag",
"wco": "w_o_diag",
"wcf": "w_f_diag",
"scope": "lstm_cell"
}
@property
def state_size(self):
return core_rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, x, states_prev, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or self._names["scope"]):
x_shape = x.get_shape().with_rank(2)
if not x_shape[1].value:
raise ValueError("Expecting x_shape[1] to be set: %s" % str(x_shape))
if len(states_prev) != 2:
raise ValueError("Expecting states_prev to be a tuple with length 2.")
input_size = x_shape[1].value
w = vs.get_variable(self._names["W"], [input_size + self._num_units,
self._num_units * 4])
b = vs.get_variable(
self._names["b"], [w.get_shape().with_rank(2)[1].value],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
wci = vs.get_variable(self._names["wci"], [self._num_units])
wco = vs.get_variable(self._names["wco"], [self._num_units])
wcf = vs.get_variable(self._names["wcf"], [self._num_units])
else:
wci = wco = wcf = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = states_prev
(_, cs, _, _, _, _, h) = _lstm_block_cell(
x,
cs_prev,
h_prev,
w,
b,
wci=wci,
wco=wco,
wcf=wcf,
forget_bias=self._forget_bias,
use_peephole=self._use_peephole)
new_state = core_rnn_cell.LSTMStateTuple(cs, h)
return h, new_state
class LSTMBlockWrapper(fused_rnn_cell.FusedRNNCell):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
or a list of `time_len` tensors of shape `[batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
scope: `VariableScope` for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
with vs.variable_scope(scope or "lstm_block_wrapper"):
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" %
inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError(
"Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(inputs, initial_cell_state,
initial_output, dtype,
sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(
sequence_length, time_len, dtype=dtype), [1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unstack(outputs)
final_state = core_rnn_cell.LSTMStateTuple(final_cell_state,
final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `core_rnn_cell.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Defaults to `3`.
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip
self._use_peephole = use_peephole
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
input_size = inputs_shape[2].value
w = vs.get_variable(
"weights",
[input_size + self._num_units, self._num_units * 4], dtype=dtype)
b = vs.get_variable(
"biases", [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0),
dtype=dtype)
if self._use_peephole:
wci = vs.get_variable("w_i_diag", [self._num_units], dtype=dtype)
wco = vs.get_variable("w_o_diag", [self._num_units], dtype=dtype)
wcf = vs.get_variable("w_f_diag", [self._num_units], dtype=dtype)
else:
wci = wco = wcf = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
|
|
from unittest import TestCase
import os
from zope.interface import verify
from filesystems import exceptions, interfaces
from filesystems._path import Path, RelativePath
class TestPath(TestCase):
def test_div(self):
self.assertEqual(Path("a") / "b" / "c", Path("a", "b", "c"))
def test_div_nonsense(self):
with self.assertRaises(TypeError):
Path("a") / object()
def test_descendant(self):
self.assertEqual(Path("a", "b").descendant("c"), Path("a", "b", "c"))
def test_multi_descendant(self):
self.assertEqual(Path("a").descendant("b", "c"), Path("a", "b", "c"))
def test_parent(self):
self.assertEqual(Path("a", "b").parent(), Path("a"))
def test_parent_of_root(self):
self.assertEqual(Path.root().parent(), Path.root())
def test_sibling(self):
self.assertEqual(Path("a", "b").sibling("c"), Path("a", "c"))
def test_sibling_of_root(self):
with self.assertRaises(ValueError):
Path.root().sibling("c")
def test_relative_to(self):
self.assertEqual(
Path("a", "b", "c").relative_to(Path("d", "e")),
Path("a", "b", "c"),
)
def test_heritage(self):
self.assertEqual(
list(Path("a", "b", "c", "d").heritage()), [
Path("a"),
Path("a", "b"),
Path("a", "b", "c"),
Path("a", "b", "c", "d"),
],
)
def test_from_string(self):
self.assertEqual(
Path.from_string(os.sep + os.sep.join("abc")),
Path("a", "b", "c"),
)
def test_from_string_relative(self):
self.assertEqual(
Path.from_string(os.sep.join("abc")),
RelativePath("a", "b", "c"),
)
def test_from_string_trailing_slash(self):
self.assertEqual(
Path.from_string(os.sep + os.sep.join("ab") + os.sep),
Path("a", "b"),
)
# Not sure this is better than only stripping one, but it's easier to
# do, so we start with this.
def test_from_string_multiple_trailing_slashes(self):
self.assertEqual(
Path.from_string(os.sep + os.sep.join("ab") + os.sep + os.sep),
Path("a", "b"),
)
def test_from_string_repeated_separator(self):
self.assertEqual(
Path.from_string(
(
os.sep * 3 +
"a" +
os.sep * 2 +
"b" +
os.sep +
"c"
),
),
Path("", "", "a", "", "b", "c"),
)
def test_from_string_relative_repeated_separator(self):
self.assertEqual(
Path.from_string("a" + os.sep * 3 + "b" + os.sep * 2 + "c"),
RelativePath("a", "", "", "b", "", "c"),
)
def test_from_string_parent(self):
self.assertEqual(
Path.from_string(
(
os.pardir +
os.sep +
"a" +
os.sep +
"b" +
os.sep +
os.pardir +
os.sep +
"b"
),
),
RelativePath(os.pardir, "a", "b", os.pardir, "b"),
)
def test_from_empty_string(self):
with self.assertRaises(exceptions.InvalidPath):
Path.from_string("")
def test_str(self):
self.assertEqual(
str(Path.from_string(os.sep + os.sep.join("abc"))),
os.sep + os.sep.join("abc"),
)
def test_cwd(self):
self.assertEqual(Path.cwd(), Path.from_string(os.getcwd()))
def test_cwd_is_absolute(self):
self.assertEqual(Path.cwd().relative_to(Path.root()), Path.cwd())
def test_root(self):
self.assertEqual(Path.root(), Path())
def test_root_heritage(self):
self.assertEqual(list(Path.root().heritage()), [Path.root()])
def test_basename(self):
self.assertEqual(Path("a", "b").basename(), "b")
def test_root_basename(self):
self.assertEqual(Path().basename(), "")
def test_dirname(self):
self.assertEqual(
Path("a", "b", "c").dirname(),
os.path.join(os.sep, "a", "b"),
)
def test_root_dirname(self):
self.assertEqual(Path().dirname(), os.sep)
def test_repr(self):
self.assertEqual(
repr(Path("a", "b", "c")),
"<Path /a/b/c>"
)
def test_expanded(self):
self.assertEqual(
Path.expanded("~/foo/~/bar"),
Path.from_string(os.path.expanduser("~/foo/~/bar")),
)
def test_interface(self):
verify.verifyClass(interfaces.Path, Path)
def test_is_pathlike(self):
self.assertEqual(
os.fspath(Path.from_string(os.sep + os.sep.join("abc"))),
os.sep + os.sep.join("abc"),
)
class TestRelativePath(TestCase):
def test_div(self):
self.assertEqual(
RelativePath("a") / "b" / "c",
RelativePath("a", "b", "c"),
)
def test_div_nonsense(self):
with self.assertRaises(TypeError):
RelativePath("a") / object()
def test_relative_to(self):
self.assertEqual(
RelativePath("a", "b", "c").relative_to(Path("d", "e")),
Path("d", "e", "a", "b", "c"),
)
def test_str(self):
self.assertEqual(
str(RelativePath("a", "b", "c")), os.path.join("a", "b", "c"),
)
def test_repr(self):
self.assertEqual(
repr(RelativePath("a", "b", "c")),
"<Path a/b/c>"
)
def test_basename(self):
self.assertEqual(RelativePath("a", "b").basename(), "b")
def test_dirname(self):
self.assertEqual(
RelativePath("a", "b", "c").dirname(),
os.path.join("a", "b"),
)
def test_parent(self):
self.assertEqual(RelativePath("a", "b").parent(), RelativePath("a"))
def test_heritage(self):
self.assertEqual(
list(RelativePath("a", "b", "c", "d").heritage()), [
RelativePath("a"),
RelativePath("a", "b"),
RelativePath("a", "b", "c"),
RelativePath("a", "b", "c", "d"),
],
)
def test_sibling(self):
self.assertEqual(
RelativePath("a", "b").sibling("c"),
RelativePath("a", "c"),
)
def test_descendant(self):
self.assertEqual(
RelativePath("a", "b").descendant("c"),
RelativePath("a", "b", "c"),
)
def test_multi_descendant(self):
self.assertEqual(
RelativePath("a").descendant("b", "c"),
RelativePath("a", "b", "c"),
)
def test_is_pathlike(self):
self.assertEqual(
os.fspath(RelativePath("a", "b", "c")),
os.path.join("a", "b", "c"),
)
def test_interface(self):
verify.verifyClass(interfaces.Path, RelativePath)
|
|
#!/usr/bin/env python
import vtk
import numpy as np
from vmtk import vmtkscripts
from vmtk import vtkvmtk
import argparse
import itertools
import os
def close_cell(section):
#assume the cell array of lines
section.BuildCells()
section.BuildLinks()
numberOfLinePoints = section.GetNumberOfPoints()
cell_ids = vtk.vtkIdList()
numberOfSingleCellPoints = 0
termination_pts = []
for i in range(section.GetNumberOfPoints()):
section.GetPointCells(i,cell_ids)
if(cell_ids.GetNumberOfIds() == 1):
numberOfSingleCellPoints += 1
termination_pts.append(i)
if(numberOfSingleCellPoints == 2):
print(termination_pts)
line = vtk.vtkLine()
line.GetPointIds().SetId(0, termination_pts[0])
line.GetPointIds().SetId(1, termination_pts[1])
section.GetLines().InsertNextCell(line)
elif(numberOfSingleCellPoints > 2):
print("disconnected section")
def ComputePolygonArea(section):
# calculate area of closed polygon
section.BuildCells()
section_area = 0.0
area_calc = 0.0
if (section.GetNumberOfCells() == 0):
print("shwarma")
return section_area
elif (section.GetNumberOfCells() > 1):
print("there should only be one cell")
trianglePointIds = vtk.vtkIdList()
points_list = vtk.vtkPoints()
for j in range(section.GetNumberOfCells()):
area_calc = 0.0
cell = section.GetCell(j)
if ( cell.GetCellType() != vtk.VTK_POLYGON ):
print(cell.GetCellType())
continue
#cell.Triangulate(j, trianglePointIds, points_list)
cell.Triangulate(trianglePointIds)
numberOfTriangles = trianglePointIds.GetNumberOfIds() // 3
#print("triangles", numberOfTriangles)
point0 = [0.0,0.0,0.0]
point1 = [0.0,0.0,0.0]
point2 = [0.0,0.0,0.0]
for i in range(numberOfTriangles):
pointId0 = trianglePointIds.GetId(3*i)
pointId1 = trianglePointIds.GetId(3*i+1)
pointId2 = trianglePointIds.GetId(3*i+2)
cell.GetPoints().GetPoint(pointId0, point0)
cell.GetPoints().GetPoint(pointId1, point1)
cell.GetPoints().GetPoint(pointId2, point2)
area_calc += vtk.vtkTriangle.TriangleArea(point0,point1,point2)
section_area = max(area_calc, section_area)
return section_area
def ComputeBranchSectionShape(section, origin):
# eccentricity of slice
pointIds = vtk.vtkIdList()
for j in range(section.GetNumberOfCells()):
area_calc = 0.0
cell = section.GetCell(j)
if ( cell.GetCellType() != vtk.VTK_POLYGON ):
print(cell.GetCellType())
continue
center = [0.0,0.0,0.0]
for i in range(cell.GetNumberOfPoints()):
pt = section.GetPoint(cell.GetPointIds().GetId(i))
center = [p+c for p,c in zip(pt, center)]
center = [p/cell.GetNumberOfPoints() for p in center]
diff_origin = (vtk.vtkMath.Distance2BetweenPoints(center, origin))**0.5
rad_list = []
for i in range(cell.GetNumberOfPoints()):
pt = section.GetPoint(cell.GetPointIds().GetId(i))
radius = (vtk.vtkMath.Distance2BetweenPoints(origin, pt))**0.5
rad_list.append(radius)
mean = np.mean(rad_list)
stddev = np.std(rad_list)
shape = min(rad_list)/max(rad_list)
cv = (1.0 + 1.0/(4.0*cell.GetNumberOfPoints()))*stddev/mean
offset = diff_origin/mean
#print(mean, stddev, cv, offset, shape)
return cv, offset, shape
# get the average radius for each segment
def Execute(args):
print("evaluate centerlines")
reader_ctr = vmtkscripts.vmtkSurfaceReader()
reader_ctr.InputFileName = args.centerlines
reader_ctr.Execute()
print(args.clean_ctr)
if(args.clean_ctr):
cleaner = vtk.vtkCleanPolyData()
cleaner.PointMergingOn()
cleaner.SetInputData(reader_ctr.Surface)
cleaner.Update()
centerlines = cleaner.GetOutput()
else:
centerlines = reader_ctr.Surface
centerlines.BuildLinks()
centerlines.BuildCells()
reader_br = vmtkscripts.vmtkSurfaceReader()
reader_br.InputFileName = args.surface
reader_br.Execute()
#if (reader_br.Surface.GetPointData().GetNormals() == None):
#normalsFilter = vmtkscripts.vmtkSurfaceNormals()
#normalsFilter.ComputeCellNormals = 1
#normalsFilter.Surface = reader_br.Surface
#normalsFilter.NormalsArrayName = 'Normals'
#normalsFilter.Execute()
#surface_reference = normalsFilter.Surface
#else:
surface_reference = reader_br.Surface
locator_surf = vtk.vtkPointLocator()
locator_surf.SetDataSet(surface_reference)
locator_surf.BuildLocator()
locator_cell = vtk.vtkCellLocator()
locator_cell.SetDataSet(surface_reference)
locator_cell.BuildLocator()
cell_Ids = vtk.vtkIdList()
outputLines = vtk.vtkCellArray()
output = vtk.vtkPolyData()
triangles = vtk.vtkCellArray()
triangle_pd = vtk.vtkPolyData()
triangle_pts = vtk.vtkPoints()
lengthArray = vtk.vtkDoubleArray()
lengthArray.SetName("length")
lengthArray.SetNumberOfComponents(1)
pts_ids = vtk.vtkIdList()
factor = 1.0
factor2 = 2.0
pd_count = 0
size_range = [0.0, 0.0]
bifurcation_info = {}
for i in range(centerlines.GetNumberOfCells()):
bifurcation_info[i] = {"clip_id": [], "cell_id": []}
cell = centerlines.GetCell(i)
if cell.GetCellType() not in (vtk.VTK_POLY_LINE, vtk.VTK_LINE):
continue
n_cell_pts = cell.GetNumberOfPoints()
start_end_pt = [0, n_cell_pts-1]
for j in start_end_pt:
pt_id_pd = cell.GetPointIds().GetId(j)
centerlines.GetPointCells(pt_id_pd, cell_Ids)
if (cell_Ids.GetNumberOfIds() > 1):
radius = centerlines.GetPointData().GetArray("MaximumInscribedSphereRadius").GetTuple(pt_id_pd)[0]
length = 0.0
radius2 = 0.0
prev_point = centerlines.GetPoint(pt_id_pd)
if( j == start_end_pt[0]):
step = 1
stop = start_end_pt[-1]
else:
step = -1
stop = -1
for k in range(j, stop, step):
point = centerlines.GetPoint(cell.GetPointIds().GetId(k))
length += vtk.vtkMath.Distance2BetweenPoints(prev_point,point)**0.5
prev_point = point
if (length > (factor*radius + factor2*radius2)):
#print(length)
pl_vec = centerlines.GetPointData().GetArray("FrenetTangent").GetTuple(cell.GetPointIds().GetId(k))
pl = vtk.vtkPlane()
pl.SetOrigin(point)
pl.SetNormal(pl_vec)
cut = vtk.vtkCutter()
cut.SetInputData(surface_reference)
cut.SetCutFunction(pl)
cut.Update()
ex = vtk.vtkPolyDataConnectivityFilter()
ex.SetInputConnection(cut.GetOutputPort())
#ex.SetExtractionModeToAllRegions()
ex.SetExtractionModeToClosestPointRegion()
ex.SetClosestPoint(point)
ex.Update()
lp = ex.GetOutput()
close_cell(lp)
cutStrips = vtk.vtkStripper() # Forms loops (closed polylines) from cutter
cutStrips.SetInputData(lp)
cutStrips.Update()
cutPoly = vtk.vtkPolyData() # This trick defines polygons as polyline loop
cutPoly.SetPoints((cutStrips.GetOutput()).GetPoints())
cutPoly.SetPolys((cutStrips.GetOutput()).GetLines())
area_test = ComputePolygonArea(cutPoly)
size_ratio = area_test/(np.pi*radius**2)
#print(area_test, radius, size_ratio)
if(size_ratio > 2.0 ):
continue
cv, offset, shape = ComputeBranchSectionShape(cutPoly, point)
if(cv > 0.2):
continue
if(offset > 0.10):
continue
#if(shape > 0.8):
# continue
#else:
#average area
#radius2 = max(radius, np.sqrt(area_test/np.pi))
#shape = ComputeBranchSectionShape(cutPoly, point, size_range)
writerline = vmtkscripts.vmtkSurfaceWriter()
writerline.OutputFileName = "test_loop_{0}.vtp".format(pd_count)
writerline.Input = cutPoly #ex.GetOutput()
writerline.Execute()
pd_count += 1
#if (radius2 <= 0.0):
#radius2 = centerlines.GetPointData().GetArray("MaximumInscribedSphereRadius").GetTuple(cell.GetPointIds().GetId(k))[0]
##if ( radius2 > radius):
##radius = radius2
##else:
##ratio = radius/radius2
#else:
#print(length)
clip_id = cell.GetPointIds().GetId(k)
bifurcation_info[i]["clip_id"].append(clip_id)
bifurcation_info[i]["cell_id"].append(k)
break
#return
#t = [ 1 for i in bifurcation_info.keys() if len(bifurcation_info[i]) == 2]
two_bif = False
pd_count = 0
for cell in bifurcation_info:
id_sorted = sorted(bifurcation_info[cell]["cell_id"])
if (len(bifurcation_info[cell]["cell_id"]) < 2):
two_bif = False
else:
two_bif = True
diff = bifurcation_info[cell]["cell_id"][0] - bifurcation_info[cell]["cell_id"][1]
if(abs(diff) < 2): # there is a problem if there less than two points
print("houston we got a problem")
clip_id = centerlines.GetCell(cell).GetPointIds().GetId(id_sorted[0])
clip_id_m1 = centerlines.GetCell(cell).GetPointIds().GetId(id_sorted[0]+1)
start_pt = centerlines.GetPoint(clip_id)
surface_pt_id = locator_surf.FindClosestPoint(start_pt)
# vector from pt(start_pt+1) - pt(start_pt)
v_start = [ x - y for x,y in zip(centerlines.GetPoint(clip_id_m1), start_pt)]
v_ctr_start = centerlines.GetPointData().GetArray("FrenetTangent").GetTuple(clip_id)
v_normal_start = centerlines.GetPointData().GetArray("FrenetNormal").GetTuple(clip_id)
# want inward facing normals
if (vtk.vtkMath.Dot(v_start, v_ctr_start) < 0.0):
v_ctr_start = [-1.0*x for x in v_ctr_start]
#print(clip_tangent)
plane1 = vtk.vtkPlane()
plane1.SetOrigin(start_pt)
plane1.SetNormal(v_ctr_start)
#tree = vtk.vtkModifiedBSPTree()
#tree.SetDataSet(surface_reference)
#tree.BuildLocator()
##intersect the locator with the line
#LineP0 = start_pt
## 200 points
#radius_est = centerlines.GetPointData().GetArray("MaximumInscribedSphereRadius").GetTuple(clip_id)[0]
##radii increment is proportional to circumference
##distance between points
#cnt_dist = 0.05
#n_radii = int(np.pi*2.0*radius_est/cnt_dist)
#dt = radius_est*4.0*cnt_dist #estimate ray step from radius
#dtheta = [0.0 + i*(359.0-0.0)/(n_radii-1) for i in range(n_radii)] #[0.0]
#out_vector = (0.0,0.0,0.0)
#tolerance = 0.0000001
#polylines = vtk.vtkCellArray()
#cut_surface = vtk.vtkPolyData()
#new_line = vtk.vtkPolyLine()
#new_line.GetPointIds().SetNumberOfIds(len(dtheta)+1)
#IntersectPointsList = vtk.vtkPoints()
#loop_pts_list = vtk.vtkPoints()
#IntersectCellsList = vtk.vtkIdList()
#for idx, theta in enumerate(dtheta):
#IntersectPoints = vtk.vtkPoints()
#IntersectCells = vtk.vtkIdList()
#code = 0
#count = 1
#rotate = vtk.vtkTransform()
#rotate.RotateWXYZ(theta, v_ctr_start)
#rotate.Update()
##print(dir(rotate))
##trans_m = vtk.vtkMatrix4x4()
##rotate.GetMatrix(trans_m)
#out_vector = rotate.TransformVector(v_normal_start)
#LineP1 = [ c2 + count*dt*c1 for c2, c1 in zip(start_pt, out_vector)]
##print(v_normal_start, out_vector)
#while ( code == 0 and count < 10000):
#count += 1
#code = tree.IntersectWithLine(LineP0, LineP1,
#tolerance, IntersectPoints,
#IntersectCells)
#LineP1 = [ c2 + count*dt*c1 for c2, c1 in zip(start_pt, out_vector)]
#if(count > 10000 and code == 0):
#print("no intersection")
#continue
#if (code != 0):
#if(IntersectCells.GetNumberOfIds() > 1):
#print(IntersectCells.GetNumberOfIds())
#pt = IntersectPoints.GetPoint(0)
##pt = [ c2 + dt*c1 for c2, c1 in zip(pt, out_vector)] # add some buffer, may not need it
#new_pt_id = IntersectPointsList.InsertNextPoint(pt)
#new_line.GetPointIds().SetId(idx, new_pt_id)
#loop_pts_list.InsertNextPoint(LineP1)
#IntersectCellsList.InsertNextId(IntersectCells.GetId(0))
##print(IntersectPoints.GetPoint(0), IntersectCells.GetId(0) )
#new_line.GetPointIds().SetId(len(dtheta), 0)
#print(IntersectPointsList.GetPoint(0))
#print(v_ctr_start, start_pt)
#polylines.InsertNextCell(new_line)
#cut_surface.SetPoints(IntersectPointsList)
#cut_surface.SetLines(polylines)
#writerline = vmtkscripts.vmtkSurfaceWriter()
#writerline.OutputFileName = "test_loop_{0}.vtp".format(pd_count)
#writerline.Input = cut_surface
#writerline.Execute()
cutter = vtk.vtkCutter()
cutter.SetInputData(surface_reference)
cutter.SetCutFunction(plane1)
cutter.Update()
extract = vtk.vtkPolyDataConnectivityFilter()
extract.SetInputConnection(cutter.GetOutputPort())
extract.SetExtractionModeToClosestPointRegion()
extract.SetClosestPoint(start_pt)
extract.Update()
loop = extract.GetOutput()
weights = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
gencell = vtk.vtkGenericCell()
cross_inter = [0.0,0.0,0.0]
cross_edges = [0.0,0.0,0.0]
cross_test = [0.0,0.0,0.0]
test_pt = [0.0,0.0,0.0]
thresh = 0.0
first_3tris = False
for i in range(loop.GetNumberOfCells()):
pt1 = loop.GetPoint(loop.GetCell(i).GetPointIds().GetId(0))
pt2 = loop.GetPoint(loop.GetCell(i).GetPointIds().GetId(1))
mid_pt = [ (x+y)/2.0 for x,y in zip(pt2,pt1)]
direction = [ x-y for x,y in zip(pt2,pt1)]
cell_id = locator_cell.FindCell(mid_pt, 0.0001, gencell, test_pt, weights)
cell_ = surface_reference.GetCell(cell_id)
right = []
left = []
center = []
pt_list = []
for j in range(cell_.GetNumberOfPoints()):
#get distance
pt_list.append(surface_reference.GetPoint(cell_.GetPointIds().GetId(j)))
dist = plane1.EvaluateFunction(pt_list[-1])
if ( dist < -thresh):
left.append(j)
elif (dist > thresh):
right.append(j)
else:
center.append(j)
tag = ""
if len(center) > 1:
# don't do anything its already split on edge
tag = "edge"
print("edge")
elif len(center) > 0:
# split into two triangles
pt = center[0]
tag = "2_tris"
else:
tag = "3_tris"
if (len(left) > 1):
#print("left")
pt = right[0]
elif (len(right) > 1):
pt = left[0]
else:
print("split triangle")
edge1 = [ x-y for x,y in zip(pt_list[(pt+1)%3],pt_list[pt])]
edge2 = [ x-y for x,y in zip(pt_list[(pt+2)%3],pt_list[pt])]
vtk.vtkMath.Cross(edge1, edge2, cross_edges)
vtk.vtkMath.Normalize(cross_edges)
vtk.vtkMath.Cross(edge1, direction, cross_test)
vtk.vtkMath.Normalize(cross_test)
is_winding = vtk.vtkMath.Dot(cross_edges, cross_test)
# switch the winding of the intersection points
if(is_winding < 0.0):
tmp = pt1
pt1 = pt2
pt2 = tmp
if ( tag == "3_tris"):
if(first_3tris == False):
first_3tris = True
# first triangle
#new_cell = vtk.vtkTriangle()
#pts_id_list = []
#pt_id_1 = triangle_pts.InsertNextPoint(pt_list[pt])
#new_cell.GetPointIds().SetId(0, pt_id_1)
#pt_id_2 = triangle_pts.InsertNextPoint(pt1)
#new_cell.GetPointIds().SetId(1, pt_id_2)
#pt_id_3 = triangle_pts.InsertNextPoint(pt2)
#new_cell.GetPointIds().SetId(2, pt_id_3)
#triangles.InsertNextCell(new_cell)
triangle_pts = vtk.vtkPoints()
quad_id_1 = triangle_pts.InsertNextPoint(pt2)
quad_id_2 = triangle_pts.InsertNextPoint(pt1)
quad_id_3 = triangle_pts.InsertNextPoint(pt_list[(pt+1)%3])
quad_id_4 = triangle_pts.InsertNextPoint(pt_list[(pt+2)%3])
pts_new_triangle = []
pt_id_2 = surface_reference.GetPoints().InsertNextPoint(pt1)
pts_new_triangle.append(surface_reference.GetCell(cell_id).GetPointIds().GetId(pt))
pts_new_triangle.append(pt_id_2)
surface_reference.GetPointData().GetArray("Ids").InsertNextTuple([pt_id_2])
pt_id_2_old = surface_reference.GetCell(cell_id).GetPointIds().GetId((pt+1)%3)
#surface_reference.GetCell(cell_id).GetPointIds().SetId((pt+1)%3, pt_id_2)
pt_id_3 = surface_reference.GetPoints().InsertNextPoint(pt2)
pts_new_triangle.append(pt_id_3)
surface_reference.GetPointData().GetArray("Ids").InsertNextTuple([pt_id_2])
pt_id_3_old = surface_reference.GetCell(cell_id).GetPointIds().GetId((pt+2)%3)
#surface_reference.GetCell(cell_id).GetPointIds().SetId((pt+2)%3, pt_id_3)
surface_reference.ReplaceCell(cell_id, len(pts_new_triangle), pts_new_triangle)
# map polygon to reference mesh
map_to = {quad_id_1 : pt_id_3, quad_id_2 : pt_id_2, quad_id_3 : pt_id_2_old, quad_id_4 : pt_id_3_old}
npts = 4
polygon = vtk.vtkPolygon()
polygon.GetPointIds().SetNumberOfIds(npts)
polygon.GetPoints().SetNumberOfPoints(npts)
polygon.GetPointIds().SetId(0, quad_id_1)
polygon.GetPoints().SetPoint(0, triangle_pts.GetPoint(quad_id_1))
polygon.GetPointIds().SetId(1, quad_id_2)
polygon.GetPoints().SetPoint(1, triangle_pts.GetPoint(quad_id_2))
polygon.GetPointIds().SetId(2, quad_id_3)
polygon.GetPoints().SetPoint(2, triangle_pts.GetPoint(quad_id_3))
polygon.GetPointIds().SetId(3, quad_id_4)
polygon.GetPoints().SetPoint(3, triangle_pts.GetPoint(quad_id_4))
quad_ids = vtk.vtkIdList()
polygon.Triangulate(quad_ids)
numPts = quad_ids.GetNumberOfIds()
numSimplices = numPts // 3
triPts = [0,0,0]
triPts_map = [0,0,0]
#print(numSimplices, numPts
for j in range(numSimplices):
for k in range(3):
triPts[k] = polygon.GetPointIds().GetId(quad_ids.GetId(int(3*j+k)))
triPts_map[k] = map_to[triPts[k]]
#triangles.InsertNextCell(3, triPts)
cell_id_new = surface_reference.GetPolys().InsertNextCell(3,triPts_map)
surface_reference.GetCellData().GetArray("Ids").InsertNextTuple([cell_id_new])
#surface_reference.Modified()
#print("hello")
#if ( tag == "2_tris"):
## doesnt' work well
## for collapse of intersection line on
#new_cell = vtk.vtkTriangle()
#pts_id_list = []
#pt_id_1 = triangle_pts.InsertNextPoint(pt_list[pt])
#new_cell.GetPointIds().SetId(0, pt_id_1)
#pt_id_2 = triangle_pts.InsertNextPoint(pt_list[(pt+1)%3])
#new_cell.GetPointIds().SetId(1, pt_id_2)
#pt_id_3 = triangle_pts.InsertNextPoint(pt2)
#new_cell.GetPointIds().SetId(2, pt_id_3)
#triangles.InsertNextCell(new_cell)
#new_cell = vtk.vtkTriangle()
#new_cell.GetPointIds().SetId(0, pt_id_1)
#new_cell.GetPointIds().SetId(1, pt_id_3)
#pt_id_4 = triangle_pts.InsertNextPoint(pt_list[(pt+2)%3])
#new_cell.GetPointIds().SetId(2, pt_id_4)
#triangles.InsertNextCell(new_cell)
#triangle_pd.SetPoints(triangle_pts)
#triangle_pd.SetPolys(triangles)
#pass_ = vtk.vtkPassArrays()
#pass_.SetInputData(surface_reference)
#pass_.RemoveArraysOn()
#pass_.RemoveCellDataArray("Ids")
#pass_.RemoveCellDataArray("Normals")
#pass_.RemovePointDataArray("Ids")
#pass_.RemovePointDataArray("Normals")
##pass_.ClearPointDataArrays()
##pass_.ClearCellDataArrays()
#pass_.Update()
#geom = vtk.vtkGeometryFilter()
#geom.SetInputConnection(pass_.GetOutputPort())
#geom.Update()
#normalsFilter2 = vmtkscripts.vmtkSurfaceNormals()
#normalsFilter2.ComputeCellNormals = 1
#normalsFilter2.Surface = surface_reference
#normalsFilter2.NormalsArrayName = 'Normals'
#normalsFilter2.Execute()
#writer = vmtkscripts.vmtkSurfaceWriter()
#writer.OutputFileName = "test_file_{0}.vtp".format(pd_count)
#writer.Input = surface_reference #geom.GetOutput() #triangle_pd #extract.GetOutput()
#writer.Execute()
pd_count += 1
#print("yp")
surface_reference.Modified()
#print("zzz")
surface_reference.BuildCells()
surface_reference.BuildLinks()
#print("yppp")
locator_surf = vtk.vtkPointLocator()
locator_surf.SetDataSet(surface_reference)
locator_surf.BuildLocator()
#print("ydddp")
locator_cell = vtk.vtkCellLocator()
locator_cell.SetDataSet(surface_reference)
locator_cell.BuildLocator()
#return
print( bifurcation_info)
#return
normalsFilter2 = vmtkscripts.vmtkSurfaceNormals()
normalsFilter2.ComputeCellNormals = 1
normalsFilter2.Surface = surface_reference
normalsFilter2.NormalsArrayName = 'Normals'
normalsFilter2.Execute()
writer = vmtkscripts.vmtkSurfaceWriter()
writer.OutputFileName = args.out_file
writer.Input = normalsFilter2.Surface
writer.Execute()
#length = 0.0
#start_pt_idx = 0
#for j in range(n_cell_pts):
#centerlines.GetPointCells(cell.GetPointIds().GetId(j), cell_Ids)
#n_pt_neighbors = cell_Ids.GetNumberOfIds()
#pt_id = cell.GetPointIds().GetId(j)
#pts_ids.InsertNextId(pt_id)
#point = centerlines.GetPoint(cell.GetPointIds().GetId(j))
#length += vtk.vtkMath.Distance2BetweenPoints(prevPoint,point)**0.5
#prevPoint = point
#if((j > start_pt_idx and n_pt_neighbors > 1) or (j == n_cell_pts-1)):
##close
#new_polyline = addPolyLine(pts_ids)
## weird issue with duplicate points if they are not removed
#if(length > 0.0):
#outputLines.InsertNextCell(new_polyline)
#lengthArray.InsertNextTuple([length])
#start_pt_idx = j
#if(n_pt_neighbors > 1):
#pts_ids.Reset()
#pts_ids.InsertNextId(pt_id)
#length = 0.0
#pts_ids.Reset()
#output.SetPoints(centerlines.GetPoints())
#output.SetLines(outputLines)
#output.GetCellData().AddArray(lengthArray)
#for i in range(centerlines.GetPointData().GetNumberOfArrays()):
#output.GetPointData().AddArray(centerlines.GetPointData().GetArray(i))
#writer = vmtkscripts.vmtkSurfaceWriter()
#writer.OutputFileName = args.out_file
#if(args.clean_ctr):
#cleaner2 = vtk.vtkCleanPolyData()
#cleaner2.PointMergingOn()
#cleaner2.SetInputData(output)
#cleaner2.Update()
#writer.Input = cleaner2.GetOutput()
#else:
#writer.Input = output
#writer.Execute()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='estimate vertices for uniform point distribution')
parser.add_argument("-i", dest="surface", required=True, help="input surface file", metavar="FILE")
parser.add_argument("-c", dest="centerlines", required=True, help="centerlines", metavar="FILE")
parser.add_argument("--clean", dest="clean_ctr", action='store_true', help=" clean centerlines after")
parser.add_argument("-o", dest="out_file", required=True, help="output filename for labeled surface mesh", metavar="FILE")
parser.add_argument("-s", dest="out_segments", required=True, help="output filename for evaluated centerlines and surface mesh (slices)", metavar="FILE")
args = parser.parse_args()
#print(args)
Execute(args)
|
|
'''
module: utils.py
use: contains functions associated with general functionality that are not unique to any particular part of the project
'''
import numpy as np
#from scipy.stats import mode #this isnt actually used i think
def getDifferenceArray(vector):
'''
Purpose:
Takes an m by n vector and returns a symmetric array with elements representing the different between components in the vector
array[i,j] = ||vector[i,:] - vector[j,:]||^2
Inputs:
vector - m by n ndarray type representing a set of joint positions, for example
Outputs:
array - n by n ndarray with the i-jth element equal to the norm^2 difference between the ith and jth rows of vector
'''
vec_len = len(vector)
array = np.zeros((vec_len,vec_len))
for i in range(0,vec_len):
for j in range(i,vec_len):
array[i,j] = np.linalg.norm((vector[i,:]-vector[j,:]))
array = symmetrize(array)
return array
def getSimilarityArray(feature_array,similarity_method = 'exp',k_nn = 5):
'''
Purpose:
Computes the similarity array for a given feature set, similarity method, and k_nearest_neighbors value
Part of the spectral clustering process
Inputs:
feature_array - set of features
similarity_method - method to use for computing the similarity array:
--'exp' computes W[i,j] = exp(-||xi - xj||^2 / 2)
--'norm' computes W[i,j] = ||xi - xj||^2
--'chain' is specifically for the 'chain' generateData type
k_nn - number of nearest neighbors to consider (k_nn=5 means only the top 5 largest similarity values are kept nonzero)
Outputs:
sim_array - symmetric array of similarity strength values
'''
allowed_methods = ['exp','norm','chain']
if similarity_method not in allowed_methods:
print 'ERROR: Not a valid similarity_method'
return
else:
sim_array = np.zeros((len(feature_array),len(feature_array)))
i = 0
j = 0
for rowi in feature_array:
for rowj in feature_array:
if i <= j:
difference = (rowi-rowj).T
if similarity_method == 'exp':
sim_array[i,j] = np.exp(-1*((difference.T).dot(difference)))
elif similarity_method == 'norm':
sim_array[i,j] = difference.T.dot(difference)
elif similarity_method == 'chain':
if np.linalg.norm(difference) <= 1.5:
if ((i != int(len(feature_array)/2.)-1) and (j != int(len(feature_array)/2.))):
sim_array[i,j] = 1
if i == j:
sim_array[i,j] = 1
j += 1
i += 1
j = 0
sim_array = sim_array - np.diag(sim_array.diagonal()) #remove diagonal nonzero values
if k_nn != -1:
for rowi in sim_array:
ind = np.argpartition(rowi, -1*k_nn)[(-1*k_nn):]
for i in range(len(rowi)):
if i not in ind:
rowi[i] = 0;
return symmetrize(sim_array)
def symmetrize(array):
'''
Purpose:
Returns the symmetric version of an upper or lower triangular array
Inputs:
array - upper OR lower triangular ndarray
Outputs:
symmetric version of array
'''
return array + array.T - np.diag(array.diagonal())
def normalize(array,normalizer):
'''
Purpose:
Normalize an array by some 'normalizer' value
Inputs:
array - an ndarray type
normalizer - non int-type value
Outputs:
array - output of (array/normalizer)
'''
array = (1.0/normalizer)*array
return array
def runningAvg(vector,N):
'''
Purpose:
Performs a runningAvg calculation on a 1d array 'vector' and averages over N spaces
Inputs:
vector - ndarray 1-dimensional array
N - number of elements to average over
Outputs:
vector with each element being the runningAvg over N elements - same size as original vector
'''
return np.convolve(vector, np.ones(N,)/(N*1.0))[(N-1):]
def numOutsideBounds(_input,bounds):
'''
Purpose:
given an input vector of length n and bounds = [lower,upper] each of length n (for each element in the input vector), return the number of elements of the input that are not within the lower and upper bounds
Inputs:
_input - n-length ndarray
bounds - list of [lower,upper] where lower and upper are each n-length ndarray objects representing the lower and upper bounds that the input should satisfy
Outputs:
num_outside_bounds - integer number of elements of the _input that fell outside of the bounds
'''
num_below_lower_bound = np.sum(_input<bounds[0])
num_above_upper_bound = np.sum(_input>bounds[1])
num_outside_bounds = num_below_lower_bound+num_above_upper_bound
return num_outside_bounds
def getBackwardsUniqueOrder(iterable,backward=True):
'''
Purpose:
Returns the unique 'most recently seen' order of iterables. For example if the iterable is [0,0,1,3,2,0,1,2,2], this function will return [2,1,0,3].
Inputs:
iterable - list or 1D-ndarray with potentially repeated values
backward - if set to True, then this will return the unique values starting at index 0 of the iterable instead of index -1
Outputs:
reverse - list object
'''
if backward:
_iterable = iterable[::-1]
else:
_iterable = iterable
reverse = [y for ind,y in enumerate(_iterable) if y not in _iterable[0:ind]]
return reverse
def softmax(x,alpha=-1.0,rescale_=False):
if rescale_:
x_ = rescale(x)
else:
x_ = x
expx = np.exp(alpha*np.array(x_)) #take exponential of the -x(i) values in x
total = np.sum(expx) #for use in the denominator
return expx/float(total)
def rescale(x,max_=10):
x_scaled = [k/400*float(max_) for k in x]
return x_scaled
def gaussiansMeet(mu1, std1, mu2, std2):
'''
Purpose:
Calculates the intersection points of two gaussian distributions
Inputs:
mu1, mu2 - mean values of the respective guassian distributions
std1, std2 - standard deviation values of the respective gaussian distributions
Outputs:
roots - all real values of intersection points
'''
#print 'mu stuff: ', mu1, std1, mu2,std2
a = 1/(2.*std1**2) - 1/(2.*std2**2)
b = mu2/(1.*std2**2) - mu1/(1.*std1**2)
c = mu1**2 /(2.*std1**2) - mu2**2 / (2.*std2**2) - np.log(std2/(1.*std1))
#print a,b,c
return np.roots([a,b,c])
class Subspace(object):
'''
Purpose:
Subspace class that allows for easy projections into the subspace. Used to allow a new set of points that you know would lie in the same subspace (or a similar one) if the number of points/features in the new set of points were the same as the subspace. Thus this is a useful class if there is a structure to be exploited.
Functions:
self.projectOnMe(self,X) - allows for a differently shaped matrix X (m by r) to be projected on the same space as the base subspace self.U (n by p) if they share a similar structure but for some reason are not the same number of points.
callables:
self.U - n by p basis matrix for the subspace
self.n - number of elements in the subspace
self.p - number of features in the subspace
'''
def __init__(self,U):
'''
Initialize the subspace class with the basis array U (n by p)
'''
self.U = U #U is orthogonal subspace ndarray object n by p
self.n = U.shape[0] #number of elements in the subspace
self.p = U.shape[1] #number of features in subspace
def projectOnMe(self,X,onlyshape=False):
'''
Purpose:
Adds or subtracts random points from the matrix X to coincide with the same number of points as self.n. This function uses interpolation between points randomly chosen to add new points to coincide with the dimension of the basis array self.U.
Inputs:
X - m by r array with m and r possibly different from self.n and self.p
Outputs:
Z - m by self.p array in the proper subspace self.U
'''
#project a different subspace Y (m by r, m and r possible not equal to n and p) onto the space spaned by self.U
def extendX(X):
'''
Purpose:
Adds the necessary number of points to X to match self.n
Inputs:
X - m by r array with m and r possibly different from self.n and self.p
Outputs:
X - an updated version of X that is now self.n by r
inds - array of indices that were added to X to be removed later
'''
#inds = np.array([added_ind1, added_ind2, added_ind3, ...]) int between {1,2,...,max_ind-1}
#X is too small to be projected on U, so need to add additional points
if len(X) > self.n: #check you didn't use the wrong function (should be done for you already though )
#print 'whoops, extendX() is not for you'
return
else:
num_add = self.n-len(X)
#print 'adding ', num_add, ' elements'
interps = np.random.randint(len(X)-1, size=num_add) #select interpolation indices at the halfway points ]along the elements of the basis {0.5,1.5,2.5...,max_ind-0.5}
interps = interps.astype('float64')
interps += 0.5
interps = np.sort(interps)
ceil_interps = np.ceil(interps)
Xnew = np.ones((len(X)+num_add,1))
for col in X.T: #for each column of X, interpolate
value_add = np.interp(interps,np.arange(len(col)),col)
col = np.insert(col,ceil_interps,value_add)
col99 = np.insert(col,ceil_interps,np.ones(len(ceil_interps))*-99) #fills in added entries with -99
inds = np.where(col99==-99) #inds which will be removed later
Xnew = np.hstack((Xnew,col.reshape(len(col),1)))
X = Xnew[:,1:] #ignore first column
return X, inds
def contractX(X):
'''
Purpose:
Removes the necessary number of points to X to match self.n
Inputs:
X - m by r array with m and r possibly different from self.n and self.p
Outputs:
X - an updated version of X that is now self.n by r
inds - array of indices that were removed to X to be added back in later through interpolation in the new basis
'''
#X is too large to be projected on U, so need to remove points
#inds = np.array([added_ind1, added_ind2, added_ind3, ...])
if len(X) < self.n:
#print 'whoops, contractX() is not for you'
return
else:
num_remove = len(X) - self.n
#print 'removing ', num_remove, ' elements'
removes = np.random.choice(len(X)-2,size=num_remove,replace=False)+1 #select from {1,2,...max_ind-1} without replacement
removes = np.sort(removes)
inds = np.empty_like(removes)
for i,r in enumerate(removes):
inds[i] = r-1-i #index after which to place the new element when adding them back for interpolation
Xnew = np.ones((len(X)-num_remove,1))
for col in X.T:
col = np.delete(col,removes,axis=0)
Xnew = np.hstack((Xnew,col.reshape(len(col),1)))
X = Xnew[:,1:]
return X, inds
def resolveProjection(Z,inds,status):
'''
Purpose:
Resolves the projection process after the newly shaped array has been projected on the new subspace by replacing the proper indicies or removing the added indicies placed in inds.
Inputs:
Z - self.n by self.p array coming from utils.projectToSubspace()
inds - indicies of removed or added points in order to shape the projected subspace into the self.U basis.
status - (0 = no changes necessary),(+1 = need to remove the unnecessary points that had been added previously),(-1 = need to add in points through interpolation at the appropriate indicies)
Outputs:
Z - m by self.p array in the proper subspace self.U
'''
if status == 0:
#print 'status is go'
return Z
elif status == +1:
#print 'removing uncessary dumb additions'
#remove unnecessary added rows from Z
Z = np.delete(Z,inds,axis=0)
return Z
elif status == -1:
#add necessary removed points to Z
#print 'adding the important addtions back'
interps = inds + 0.5
Znew = np.ones((len(Z)+len(inds),1))
for col in Z.T:
values = np.interp(inds+0.5, np.arange(len(col)), col)
col = np.insert(col,np.ceil(interps),values)
Znew = np.hstack((Znew,col.reshape(len(col),1)))
Z = Znew[:,1:]
return Z
status = 0 #default that self.U and X are the same length
inds = []
if len(X) < self.n:
#print 'extending'
X,inds = extendX(X)
status = +1 #indices have been added, will need to remove these from the projection later
elif len(X) > self.n:
#print 'contracting'
X,inds = contractX(X)
status = -1 #indices have been removed, will need to interpolate in projection later
if onlyshape:
return X
else:
Z = projectToSubspace(X,self.U)
Z = resolveProjection(Z,inds,status)
return Z
def projectToSubspace(X,Y):
'''
Purpose:
Embeds a set of features X (in R^(n by k)) onto a reduced dimension subspace Y (in R^(n by r)), r < k, via least squares approximation, Z = Xw where w = inv(X'X)X'Y
Inputs:
X - n by k feature array (ndarray type)
Y - n by r feature array, (r<k, ndarray type)
Outputs:
Z - n by r ndarray subspace projection of X onto Y
'''
w = np.linalg.lstsq(X,Y)
Z = X.dot(w[0])
return Z
def orderStates(vector):
'''
Purpose:
Orders states so that first defined state is a 0, second defined state is a 1, etc
Inputs:
vector - 1 dimensional array of a relatively small number of ints
Outputs:
ordered_vector - vector of same size as original vector but with the first few states ordered
'''
order_hold = []
for ind,elt in enumerate(vector):
if ind == 0:
order_hold.append(elt)
ordered_vector = [0]
else:
if elt not in order_hold:
order_hold.append(elt)
ordered_vector.append(order_hold.index(elt))
return ordered_vector
def generateData(N,form='bull',dim=2):
'''
Purpose:
Generates (N by dim) ndarray of a type described by 'form'
Particularly useful for testing clustering methods
Inputs:
N - length of data set
dim - number of dimensions in dataset (ie dim = 2)
form - data set type
--'sep' compiles a dataset with two distinct groups
--'bull' compiles a dataset of a bullseye shape (one labeled group within a ring of the other group)
--'chain' compiles a dataset of a linear chain with a label break in between them
Outputs:
X - compiled data array of 'form' type
y - labels associated with each of the N examples of X
'''
X = np.zeros((N,dim),dtype = np.float16)
y = np.zeros((N,1), dtype = np.int_)
if form == 'sep': #seperate clusters of data
base1 = np.ones((1,dim))
base2 = np.zeros((1,dim))
cnt = 0
while cnt < np.floor(N/2):
X[cnt,:] = base1 + 0.5*(np.random.rand(1,dim)*2.0-1.)
y[cnt] = 1
cnt += 1
while cnt < N:
X[cnt,:] = base2 + 0.5*(np.random.rand(1,dim)*2.0-1.)
y[cnt] = -1
cnt += 1
y.shape = (N,)
return X,y
elif form == 'bull': #inner cluster surrounded by ring of points
cnt=0;
X = np.zeros((N,dim),dtype = np.float16)
y = np.zeros((N,1), dtype = np.int_)
totalg1 = 0
totalg2 = 0
while cnt < N :
x = 2*np.random.rand(1,dim)-1;
if np.linalg.norm(x) < 0.15 and totalg1<=(N-np.floor(N/1.2)):
X[cnt,:] = x;
y[cnt] = +1
cnt=cnt+1;
totalg1 +=1
elif (np.linalg.norm(x) > 0.5 and np.linalg.norm(x) < 0.55) and totalg2<(N-(N-np.floor(N/1.2))):
X[cnt,:] = x;
y[cnt] = -1
cnt=cnt+1;
totalg2 += 1
y.shape = (N,)
return X,y
elif form == 'chain': #linear chain graph of N points
X = np.zeros((N,dim),dtype = np.float16)
for i in np.arange(N):
X[i,:] = i
if i < N/2.:
y[i] = +1
else:
y[i] = -1
y.shape = (N,)
return X,y
def loader(handover_starts,data_object,n):
'''
Purpose:
utility function to get a list with the appropriate start and end frame numbers for a certain data_object and handover_starts. Returns starts = [init_frame,end_frame]
Inputs:
handover_starts - list of the frame numbers for all starts of handovers or general tasks
data_object - kinectData object that has been filled with data from a file
n - the handover number you would like to get the begining and end frames of
Outputs:
starts - list of [init_frame for handover n, end_frame for handover n]
'''
try:
starts = [handover_starts[n],handover_starts[n+1]]
except IndexError:
starts = [handover_starts[n],data_object.num_vectors-1]
return starts
def runTasks(handover_starts,data_obj,task_obj,n,max_ind=10):
'''
Purpose:
Performs the task class update step for n randomly chosen tasks from the potential dataset of tasks in handover_starts. In other words, for n = 5, 5 different values from handover_starts will be chosen to be used for the task update on the task_obj using data found in data_obj. max_ind represents the total number of handover options in handover_starts to choose from.
Inputs:
handover_starts - list of the frame numbers for all starts of handovers or general tasks
data_obj - kinectData object
task_obj - process.Task() object
n - number of handovers/full tasks to randomly choose
max_ind - total number of handovers available to be chosen from
Outputs:
no return object, but task_obj is updated with the new state values and historical data
'''
inds = np.random.randint(max_ind,size=n)
for i in inds:
task_obj.update(data_obj,loader(handover_starts,data_obj,i))
def euclideanDist(point1,point2):
'''
Purpose:
Calculates euclidean distance between two points
Inputs:
point1,point2 - same dimensioned points in some space
Outputs:
output - euclidean distance between the two points, ||point1-point2||_2
'''
return np.linalg.norm(point1-point2)
def majorityVote(values):
'''
Purpose:
Outputs the most often seen values from a 1d list/array of values along with a list of the sorted indices and sorted values by majority vote
Inputs:
values - 1d list/array of values that include redundant values
Outputs: Two outputs - output1,output2
output1 - the most often counted value that was found in values
output2 - list with two components = [sorted unique values from least often to most often, counts corresponding to the unique values]
'''
'''test code (place on own as main)
x1 = [2]*5+[1]*10+[0]*3 # expected list return- [0,2,1], [3,5,10]
x2 = [1]*5+[2]*10+[0]*3 # [0,1,2], [3,5,10]
x3 = [0]*5+[1]*10+[2]*3 # [2,0,1], [3,5,10]
x4 = [0]*5+[2]*10+[1]*3 # [1,0,2], [3,5,10]
x5 = [2]*5+[0]*10+[1]*3 # [1,2,0], [3,5,10]
x6 = [2]*10+[1]*5 # [1,2], [5,10]
def dothings(x):
print x
best_val, obj = majorityVote(x)
print 'most often: ', best_val
print 'sorted indicies: ', obj[0]
print 'sorted count values for indicies: ', obj[1]
dothings(x1)
dothings(x2)
dothings(x3)
dothings(x4)
dothings(x5)
dothings(x6)
'''
#print 'np.unique(values): ', np.unique(values), values
if isinstance(values,list):
uValues = np.unique(values).tolist()
uCounts = [np.sum(np.array(values) == uv) for uv in uValues]
sorted_inds = np.argsort(uCounts)
best_val = uValues[sorted_inds[-1]]
sorted_vals = [int(uValues[x]) for x in sorted_inds]
sorted_cnts = np.sort(uCounts)
else:
best_val = values
sorted_vals = values
sorted_cnts = len(values)
return best_val, [sorted_vals, sorted_cnts]
def kNN(new_point, history_points, history_labels, k=5):
'''
Purpose:
performs k nearest neighbors algorithm using euclidean distances. Need to give the new point and the past labeled points and labels along with the number of past points to choose the new point label from.
Inputs:
new_point - 1 by p array representing the new p-featured point in space
history_points - n by p array representing the known labeled points in space
history-labels - length n list of labels corresponding to the n history_points examples
k - nearest neighbors to consider for choosing new point label. The majority vote label from the k closest points to the new point will be output as the new label.
Outputs: Two outputs in a single list object - [vote,counts_info]
vote - majority vote label from the k closest points to the new point
counts_info - two element list [sorted_inds,counts], sorted_inds: unique labels found in the majority vote search of the k closest elements sorted from fewest examples to most examples, counts: counts of each unique label (same order as in sorted_inds) which in total sum up to k.
'''
distances = []
for old_point in history_points:
distances.append(euclideanDist(new_point,old_point))
sorted_inds = np.argsort(distances)
consider_labels = np.array(history_labels)[sorted_inds[0:k]].tolist()
vote, counts_info = majorityVote(consider_labels)
return [vote,counts_info]
def compareTaskDef(task_obj,new_labels,kinectData_obj):
import process
new_path = task_obj.definePath(new_labels)
dummy_task = process.Task(kinectData_obj) #create dummy task object to printed out the task definition
dummy_task.path = new_path[0]
dummy_task.times = new_path[1]
print 'Expected path (', sum(task_obj.times),'frames ):'
dummy_var = task_obj.printTaskDef(1)
print 'New path (', sum(dummy_task.times),'frames ):'
new_path_info = dummy_task.printTaskDef(sum(dummy_task.times)/float(sum(task_obj.times))) #prints the new path information in a good way
return
def plotFeaturesTogether(data_obj,col,starts,tasknums):
import matplotlib.pyplot as plt
colors = 'kbgrmy'
for i,t in enumerate(tasknums):
a,b = starts[t],starts[t+1]
print a,b
print colors[i]
plt.plot(np.arange(b-a),data_obj.feat_array[a:b,col],'-',color=colors[i],label='task'+str(t))
plt.legend()
|
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.logging import log_metric_pb2
from google3.cloud.graphite.mmv2.services.google.logging import log_metric_pb2_grpc
from typing import List
class LogMetric(object):
def __init__(
self,
name: str = None,
description: str = None,
filter: str = None,
disabled: bool = None,
metric_descriptor: dict = None,
value_extractor: str = None,
label_extractors: dict = None,
bucket_options: dict = None,
create_time: str = None,
update_time: str = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.filter = filter
self.disabled = disabled
self.metric_descriptor = metric_descriptor
self.value_extractor = value_extractor
self.label_extractors = label_extractors
self.bucket_options = bucket_options
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = log_metric_pb2_grpc.LoggingLogMetricServiceStub(channel.Channel())
request = log_metric_pb2.ApplyLoggingLogMetricRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.filter):
request.resource.filter = Primitive.to_proto(self.filter)
if Primitive.to_proto(self.disabled):
request.resource.disabled = Primitive.to_proto(self.disabled)
if LogMetricMetricDescriptor.to_proto(self.metric_descriptor):
request.resource.metric_descriptor.CopyFrom(
LogMetricMetricDescriptor.to_proto(self.metric_descriptor)
)
else:
request.resource.ClearField("metric_descriptor")
if Primitive.to_proto(self.value_extractor):
request.resource.value_extractor = Primitive.to_proto(self.value_extractor)
if Primitive.to_proto(self.label_extractors):
request.resource.label_extractors = Primitive.to_proto(
self.label_extractors
)
if LogMetricBucketOptions.to_proto(self.bucket_options):
request.resource.bucket_options.CopyFrom(
LogMetricBucketOptions.to_proto(self.bucket_options)
)
else:
request.resource.ClearField("bucket_options")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyLoggingLogMetric(request)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.filter = Primitive.from_proto(response.filter)
self.disabled = Primitive.from_proto(response.disabled)
self.metric_descriptor = LogMetricMetricDescriptor.from_proto(
response.metric_descriptor
)
self.value_extractor = Primitive.from_proto(response.value_extractor)
self.label_extractors = Primitive.from_proto(response.label_extractors)
self.bucket_options = LogMetricBucketOptions.from_proto(response.bucket_options)
self.create_time = Primitive.from_proto(response.create_time)
self.update_time = Primitive.from_proto(response.update_time)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = log_metric_pb2_grpc.LoggingLogMetricServiceStub(channel.Channel())
request = log_metric_pb2.DeleteLoggingLogMetricRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.filter):
request.resource.filter = Primitive.to_proto(self.filter)
if Primitive.to_proto(self.disabled):
request.resource.disabled = Primitive.to_proto(self.disabled)
if LogMetricMetricDescriptor.to_proto(self.metric_descriptor):
request.resource.metric_descriptor.CopyFrom(
LogMetricMetricDescriptor.to_proto(self.metric_descriptor)
)
else:
request.resource.ClearField("metric_descriptor")
if Primitive.to_proto(self.value_extractor):
request.resource.value_extractor = Primitive.to_proto(self.value_extractor)
if Primitive.to_proto(self.label_extractors):
request.resource.label_extractors = Primitive.to_proto(
self.label_extractors
)
if LogMetricBucketOptions.to_proto(self.bucket_options):
request.resource.bucket_options.CopyFrom(
LogMetricBucketOptions.to_proto(self.bucket_options)
)
else:
request.resource.ClearField("bucket_options")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteLoggingLogMetric(request)
@classmethod
def list(self, project, service_account_file=""):
stub = log_metric_pb2_grpc.LoggingLogMetricServiceStub(channel.Channel())
request = log_metric_pb2.ListLoggingLogMetricRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListLoggingLogMetric(request).items
def to_proto(self):
resource = log_metric_pb2.LoggingLogMetric()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.filter):
resource.filter = Primitive.to_proto(self.filter)
if Primitive.to_proto(self.disabled):
resource.disabled = Primitive.to_proto(self.disabled)
if LogMetricMetricDescriptor.to_proto(self.metric_descriptor):
resource.metric_descriptor.CopyFrom(
LogMetricMetricDescriptor.to_proto(self.metric_descriptor)
)
else:
resource.ClearField("metric_descriptor")
if Primitive.to_proto(self.value_extractor):
resource.value_extractor = Primitive.to_proto(self.value_extractor)
if Primitive.to_proto(self.label_extractors):
resource.label_extractors = Primitive.to_proto(self.label_extractors)
if LogMetricBucketOptions.to_proto(self.bucket_options):
resource.bucket_options.CopyFrom(
LogMetricBucketOptions.to_proto(self.bucket_options)
)
else:
resource.ClearField("bucket_options")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class LogMetricMetricDescriptor(object):
def __init__(
self,
name: str = None,
type: str = None,
labels: list = None,
metric_kind: str = None,
value_type: str = None,
unit: str = None,
description: str = None,
display_name: str = None,
metadata: dict = None,
launch_stage: str = None,
monitored_resource_types: list = None,
):
self.name = name
self.type = type
self.labels = labels
self.metric_kind = metric_kind
self.value_type = value_type
self.unit = unit
self.description = description
self.display_name = display_name
self.metadata = metadata
self.launch_stage = launch_stage
self.monitored_resource_types = monitored_resource_types
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = log_metric_pb2.LoggingLogMetricMetricDescriptor()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.type):
res.type = Primitive.to_proto(resource.type)
if LogMetricMetricDescriptorLabelsArray.to_proto(resource.labels):
res.labels.extend(
LogMetricMetricDescriptorLabelsArray.to_proto(resource.labels)
)
if LogMetricMetricDescriptorMetricKindEnum.to_proto(resource.metric_kind):
res.metric_kind = LogMetricMetricDescriptorMetricKindEnum.to_proto(
resource.metric_kind
)
if LogMetricMetricDescriptorValueTypeEnum.to_proto(resource.value_type):
res.value_type = LogMetricMetricDescriptorValueTypeEnum.to_proto(
resource.value_type
)
if Primitive.to_proto(resource.unit):
res.unit = Primitive.to_proto(resource.unit)
if Primitive.to_proto(resource.description):
res.description = Primitive.to_proto(resource.description)
if Primitive.to_proto(resource.display_name):
res.display_name = Primitive.to_proto(resource.display_name)
if LogMetricMetricDescriptorMetadata.to_proto(resource.metadata):
res.metadata.CopyFrom(
LogMetricMetricDescriptorMetadata.to_proto(resource.metadata)
)
else:
res.ClearField("metadata")
if LogMetricMetricDescriptorLaunchStageEnum.to_proto(resource.launch_stage):
res.launch_stage = LogMetricMetricDescriptorLaunchStageEnum.to_proto(
resource.launch_stage
)
if Primitive.to_proto(resource.monitored_resource_types):
res.monitored_resource_types.extend(
Primitive.to_proto(resource.monitored_resource_types)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return LogMetricMetricDescriptor(
name=Primitive.from_proto(resource.name),
type=Primitive.from_proto(resource.type),
labels=LogMetricMetricDescriptorLabelsArray.from_proto(resource.labels),
metric_kind=LogMetricMetricDescriptorMetricKindEnum.from_proto(
resource.metric_kind
),
value_type=LogMetricMetricDescriptorValueTypeEnum.from_proto(
resource.value_type
),
unit=Primitive.from_proto(resource.unit),
description=Primitive.from_proto(resource.description),
display_name=Primitive.from_proto(resource.display_name),
metadata=LogMetricMetricDescriptorMetadata.from_proto(resource.metadata),
launch_stage=LogMetricMetricDescriptorLaunchStageEnum.from_proto(
resource.launch_stage
),
monitored_resource_types=Primitive.from_proto(
resource.monitored_resource_types
),
)
class LogMetricMetricDescriptorArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [LogMetricMetricDescriptor.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [LogMetricMetricDescriptor.from_proto(i) for i in resources]
class LogMetricMetricDescriptorLabels(object):
def __init__(
self, key: str = None, value_type: str = None, description: str = None
):
self.key = key
self.value_type = value_type
self.description = description
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = log_metric_pb2.LoggingLogMetricMetricDescriptorLabels()
if Primitive.to_proto(resource.key):
res.key = Primitive.to_proto(resource.key)
if LogMetricMetricDescriptorLabelsValueTypeEnum.to_proto(resource.value_type):
res.value_type = LogMetricMetricDescriptorLabelsValueTypeEnum.to_proto(
resource.value_type
)
if Primitive.to_proto(resource.description):
res.description = Primitive.to_proto(resource.description)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return LogMetricMetricDescriptorLabels(
key=Primitive.from_proto(resource.key),
value_type=LogMetricMetricDescriptorLabelsValueTypeEnum.from_proto(
resource.value_type
),
description=Primitive.from_proto(resource.description),
)
class LogMetricMetricDescriptorLabelsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [LogMetricMetricDescriptorLabels.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [LogMetricMetricDescriptorLabels.from_proto(i) for i in resources]
class LogMetricMetricDescriptorMetadata(object):
def __init__(self, sample_period: str = None, ingest_delay: str = None):
self.sample_period = sample_period
self.ingest_delay = ingest_delay
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = log_metric_pb2.LoggingLogMetricMetricDescriptorMetadata()
if Primitive.to_proto(resource.sample_period):
res.sample_period = Primitive.to_proto(resource.sample_period)
if Primitive.to_proto(resource.ingest_delay):
res.ingest_delay = Primitive.to_proto(resource.ingest_delay)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return LogMetricMetricDescriptorMetadata(
sample_period=Primitive.from_proto(resource.sample_period),
ingest_delay=Primitive.from_proto(resource.ingest_delay),
)
class LogMetricMetricDescriptorMetadataArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [LogMetricMetricDescriptorMetadata.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [LogMetricMetricDescriptorMetadata.from_proto(i) for i in resources]
class LogMetricBucketOptions(object):
def __init__(
self,
linear_buckets: dict = None,
exponential_buckets: dict = None,
explicit_buckets: dict = None,
):
self.linear_buckets = linear_buckets
self.exponential_buckets = exponential_buckets
self.explicit_buckets = explicit_buckets
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = log_metric_pb2.LoggingLogMetricBucketOptions()
if LogMetricBucketOptionsLinearBuckets.to_proto(resource.linear_buckets):
res.linear_buckets.CopyFrom(
LogMetricBucketOptionsLinearBuckets.to_proto(resource.linear_buckets)
)
else:
res.ClearField("linear_buckets")
if LogMetricBucketOptionsExponentialBuckets.to_proto(
resource.exponential_buckets
):
res.exponential_buckets.CopyFrom(
LogMetricBucketOptionsExponentialBuckets.to_proto(
resource.exponential_buckets
)
)
else:
res.ClearField("exponential_buckets")
if LogMetricBucketOptionsExplicitBuckets.to_proto(resource.explicit_buckets):
res.explicit_buckets.CopyFrom(
LogMetricBucketOptionsExplicitBuckets.to_proto(
resource.explicit_buckets
)
)
else:
res.ClearField("explicit_buckets")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return LogMetricBucketOptions(
linear_buckets=LogMetricBucketOptionsLinearBuckets.from_proto(
resource.linear_buckets
),
exponential_buckets=LogMetricBucketOptionsExponentialBuckets.from_proto(
resource.exponential_buckets
),
explicit_buckets=LogMetricBucketOptionsExplicitBuckets.from_proto(
resource.explicit_buckets
),
)
class LogMetricBucketOptionsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [LogMetricBucketOptions.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [LogMetricBucketOptions.from_proto(i) for i in resources]
class LogMetricBucketOptionsLinearBuckets(object):
def __init__(
self, num_finite_buckets: int = None, width: float = None, offset: float = None
):
self.num_finite_buckets = num_finite_buckets
self.width = width
self.offset = offset
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = log_metric_pb2.LoggingLogMetricBucketOptionsLinearBuckets()
if Primitive.to_proto(resource.num_finite_buckets):
res.num_finite_buckets = Primitive.to_proto(resource.num_finite_buckets)
if Primitive.to_proto(resource.width):
res.width = Primitive.to_proto(resource.width)
if Primitive.to_proto(resource.offset):
res.offset = Primitive.to_proto(resource.offset)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return LogMetricBucketOptionsLinearBuckets(
num_finite_buckets=Primitive.from_proto(resource.num_finite_buckets),
width=Primitive.from_proto(resource.width),
offset=Primitive.from_proto(resource.offset),
)
class LogMetricBucketOptionsLinearBucketsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [LogMetricBucketOptionsLinearBuckets.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [LogMetricBucketOptionsLinearBuckets.from_proto(i) for i in resources]
class LogMetricBucketOptionsExponentialBuckets(object):
def __init__(
self,
num_finite_buckets: int = None,
growth_factor: float = None,
scale: float = None,
):
self.num_finite_buckets = num_finite_buckets
self.growth_factor = growth_factor
self.scale = scale
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = log_metric_pb2.LoggingLogMetricBucketOptionsExponentialBuckets()
if Primitive.to_proto(resource.num_finite_buckets):
res.num_finite_buckets = Primitive.to_proto(resource.num_finite_buckets)
if Primitive.to_proto(resource.growth_factor):
res.growth_factor = Primitive.to_proto(resource.growth_factor)
if Primitive.to_proto(resource.scale):
res.scale = Primitive.to_proto(resource.scale)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return LogMetricBucketOptionsExponentialBuckets(
num_finite_buckets=Primitive.from_proto(resource.num_finite_buckets),
growth_factor=Primitive.from_proto(resource.growth_factor),
scale=Primitive.from_proto(resource.scale),
)
class LogMetricBucketOptionsExponentialBucketsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [LogMetricBucketOptionsExponentialBuckets.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [
LogMetricBucketOptionsExponentialBuckets.from_proto(i) for i in resources
]
class LogMetricBucketOptionsExplicitBuckets(object):
def __init__(self, bounds: list = None):
self.bounds = bounds
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = log_metric_pb2.LoggingLogMetricBucketOptionsExplicitBuckets()
if float64Array.to_proto(resource.bounds):
res.bounds.extend(float64Array.to_proto(resource.bounds))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return LogMetricBucketOptionsExplicitBuckets(
bounds=float64Array.from_proto(resource.bounds),
)
class LogMetricBucketOptionsExplicitBucketsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [LogMetricBucketOptionsExplicitBuckets.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [LogMetricBucketOptionsExplicitBuckets.from_proto(i) for i in resources]
class LogMetricMetricDescriptorLabelsValueTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return log_metric_pb2.LoggingLogMetricMetricDescriptorLabelsValueTypeEnum.Value(
"LoggingLogMetricMetricDescriptorLabelsValueTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return log_metric_pb2.LoggingLogMetricMetricDescriptorLabelsValueTypeEnum.Name(
resource
)[len("LoggingLogMetricMetricDescriptorLabelsValueTypeEnum") :]
class LogMetricMetricDescriptorMetricKindEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return log_metric_pb2.LoggingLogMetricMetricDescriptorMetricKindEnum.Value(
"LoggingLogMetricMetricDescriptorMetricKindEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return log_metric_pb2.LoggingLogMetricMetricDescriptorMetricKindEnum.Name(
resource
)[len("LoggingLogMetricMetricDescriptorMetricKindEnum") :]
class LogMetricMetricDescriptorValueTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return log_metric_pb2.LoggingLogMetricMetricDescriptorValueTypeEnum.Value(
"LoggingLogMetricMetricDescriptorValueTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return log_metric_pb2.LoggingLogMetricMetricDescriptorValueTypeEnum.Name(
resource
)[len("LoggingLogMetricMetricDescriptorValueTypeEnum") :]
class LogMetricMetricDescriptorLaunchStageEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return log_metric_pb2.LoggingLogMetricMetricDescriptorLaunchStageEnum.Value(
"LoggingLogMetricMetricDescriptorLaunchStageEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return log_metric_pb2.LoggingLogMetricMetricDescriptorLaunchStageEnum.Name(
resource
)[len("LoggingLogMetricMetricDescriptorLaunchStageEnum") :]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.textfmts
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for various text formats.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Generic, Literal
from pygments.util import ClassNotFound
__all__ = ['IrcLogsLexer', 'TodotxtLexer', 'HttpLexer', 'GettextLexer']
class IrcLogsLexer(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
[T ])? # Date/time separator: T or space
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
(\S+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
(\S+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"\S+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
.. versionadded:: 0.9
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
tokens = {
'root': [
(r'^#,\s.*?$', Keyword.Type),
(r'^#:\s.*?$', Keyword.Declaration),
# (r'^#$', Comment),
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
(r'^(")([A-Za-z-]+:)(.*")$',
bygroups(String, Name.Property, String)),
(r'^".*"$', String),
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
bygroups(Name.Variable, Text, String)),
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
]
}
class HttpLexer(RegexLexer):
"""
Lexer for HTTP sessions.
.. versionadded:: 1.5
"""
name = 'HTTP'
aliases = ['http']
flags = re.DOTALL
def header_callback(self, match):
if match.group(1).lower() == 'content-type':
content_type = match.group(5).strip()
if ';' in content_type:
content_type = content_type[:content_type.find(';')].strip()
self.content_type = content_type
yield match.start(1), Name.Attribute, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator, match.group(3)
yield match.start(4), Text, match.group(4)
yield match.start(5), Literal, match.group(5)
yield match.start(6), Text, match.group(6)
def continuous_header_callback(self, match):
yield match.start(1), Text, match.group(1)
yield match.start(2), Literal, match.group(2)
yield match.start(3), Text, match.group(3)
def content_callback(self, match):
content_type = getattr(self, 'content_type', None)
content = match.group()
offset = match.start()
if content_type:
from pygments.lexers import get_lexer_for_mimetype
try:
lexer = get_lexer_for_mimetype(content_type)
except ClassNotFound:
pass
else:
for idx, token, value in lexer.get_tokens_unprocessed(content):
yield offset + idx, token, value
return
yield offset, Text, content
tokens = {
'root': [
(r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)'
r'(HTTP)(/)(1\.[01])(\r?\n|$)',
bygroups(Name.Function, Text, Name.Namespace, Text,
Keyword.Reserved, Operator, Number, Text),
'headers'),
(r'(HTTP)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)',
bygroups(Keyword.Reserved, Operator, Number, Text, Number,
Text, Name.Exception, Text),
'headers'),
],
'headers': [
(r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|$)', header_callback),
(r'([\t ]+)([^\r\n]+)(\r?\n|$)', continuous_header_callback),
(r'\r?\n', Text, 'content')
],
'content': [
(r'.+', content_callback)
]
}
class TodotxtLexer(RegexLexer):
"""
Lexer for `Todo.txt <http://todotxt.com/>`_ todo list format.
.. versionadded:: 2.0
"""
name = 'Todotxt'
aliases = ['todotxt']
# *.todotxt is not a standard extension for Todo.txt files; including it
# makes testing easier, and also makes autodetecting file type easier.
filenames = ['todo.txt', '*.todotxt']
mimetypes = ['text/x-todo']
# Aliases mapping standard token types of Todo.txt format concepts
CompleteTaskText = Operator # Chosen to de-emphasize complete tasks
IncompleteTaskText = Text # Incomplete tasks should look like plain text
# Priority should have most emphasis to indicate importance of tasks
Priority = Generic.Heading
# Dates should have next most emphasis because time is important
Date = Generic.Subheading
# Project and context should have equal weight, and be in different colors
Project = Generic.Error
Context = String
# If tag functionality is added, it should have the same weight as Project
# and Context, and a different color. Generic.Traceback would work well.
# Regex patterns for building up rules; dates, priorities, projects, and
# contexts are all atomic
# TODO: Make date regex more ISO 8601 compliant
date_regex = r'\d{4,}-\d{2}-\d{2}'
priority_regex = r'\([A-Z]\)'
project_regex = r'\+\S+'
context_regex = r'@\S+'
# Compound regex expressions
complete_one_date_regex = r'(x )(' + date_regex + r')'
complete_two_date_regex = (complete_one_date_regex + r'( )(' +
date_regex + r')')
priority_date_regex = r'(' + priority_regex + r')( )(' + date_regex + r')'
tokens = {
# Should parse starting at beginning of line; each line is a task
'root': [
# Complete task entry points: two total:
# 1. Complete task with two dates
(complete_two_date_regex, bygroups(CompleteTaskText, Date,
CompleteTaskText, Date),
'complete'),
# 2. Complete task with one date
(complete_one_date_regex, bygroups(CompleteTaskText, Date),
'complete'),
# Incomplete task entry points: six total:
# 1. Priority plus date
(priority_date_regex, bygroups(Priority, IncompleteTaskText, Date),
'incomplete'),
# 2. Priority only
(priority_regex, Priority, 'incomplete'),
# 3. Leading date
(date_regex, Date, 'incomplete'),
# 4. Leading context
(context_regex, Context, 'incomplete'),
# 5. Leading project
(project_regex, Project, 'incomplete'),
# 6. Non-whitespace catch-all
('\S+', IncompleteTaskText, 'incomplete'),
],
# Parse a complete task
'complete': [
# Newline indicates end of task, should return to root
(r'\s*\n', CompleteTaskText, '#pop'),
# Tokenize contexts and projects
(context_regex, Context),
(project_regex, Project),
# Tokenize non-whitespace text
('\S+', CompleteTaskText),
# Tokenize whitespace not containing a newline
('\s+', CompleteTaskText),
],
# Parse an incomplete task
'incomplete': [
# Newline indicates end of task, should return to root
(r'\s*\n', IncompleteTaskText, '#pop'),
# Tokenize contexts and projects
(context_regex, Context),
(project_regex, Project),
# Tokenize non-whitespace text
('\S+', IncompleteTaskText),
# Tokenize whitespace not containing a newline
('\s+', IncompleteTaskText),
],
}
|
|
"""
Metadata for four-body terms listed in alphabetical order
Each functional_form has the following values:
- form: The overall mathematical expression of the term
- parameters: The ordered name of the terms as they will be stored with their expected unit contexts
- units: A dictionary that contains the unit context for each parameter
- description: A short word description of the four-body term style
This data will be validated during testing.
"""
_four_body_functional_forms = {
# "none": {},
# "zero": {},
# "class2": {},
# "cosine/shift/exp": {},
# "hybrid": {},
# "spherical": {},
# "table": {},
"charmmfsw": {
"form": "K*(1 + cos(n*phi-d))",
"parameters": ["K", "n", "d"],
"units": {
"K": "[energy]",
"n": "count",
"d": "[arcunit]",
},
"description": "This is a charmm dihedral"
},
"multi/harmonic": {
"form": "A_1 + A_2 * (cos(phi)) + A_3 * (cos(phi)) ** 2 + A_4 * (cos(phi)) ** 3 + A_5 * (cos(phi)) ** (4)",
"parameters": ["A_1","A_2","A_3", "A_4", "A_5"],
"units": {
"A_1": "[energy]",
"A_2": "[energy]",
"A_3": "[energy]",
"A_4": "[energy]",
"A_5": "[energy]",
},
"description": "This is a multi/harmonic dihedral"
},
"ryckaert_bellemans": {
"form": "A_0 + A_1 * (cos(phi)) + A_2 * (cos(phi)) ** 2 + A_3 * (cos(phi)) ** 3 + A_4 * (cos(phi)) ** (4) + A_5 * (cos(phi)) ** (5)",
"parameters": ["A_0", "A_1","A_2","A_3", "A_4", "A_5"],
"units": {
"A_0": "[energy]",
"A_1": "[energy]",
"A_2": "[energy]",
"A_3": "[energy]",
"A_4": "[energy]",
"A_5": "[energy]",
},
"description": "This is a ryckaert_bellemans"
},
# "fourier": {
# "form": "Sum(k_i * (1.0 + cos(n_i * phi - d_i)))",
# "parameters": ["k_i", "n_i", "d_i"],
# "units": {
# "k_i": "[energy]",
# "n_i": "phase",
# "d_i": "[arcunit]"
# },
# "description": "This is a fourier dihedral"
# },
"harmonic": {
"form": "K * (1 + d * cos(n * phi))",
"parameters": ["K", "n", "d"],
"units": {
"K": "[energy]",
"n": "phase", # + / 1
"d": "count"
},
"description": "A harmonic dihedral"
},
"helix": {
"form": "A*(1-cos(phi)) + B*(1+cos(3*phi)) + C*(1+cos(phi+PI/4))",
"parameters": ["A", "B", "C"],
"units": {
"A": "[energy]",
"B": "[energy]",
"C": "[energy]"
},
"description": "This is a helix dihedral"
},
# "nharmonic": {
# "form": "Sum( A_n*(cos(phi))**(n-1))",
# "parameters": ["A_n", "n"],
# "units": {
# "A_n": "[energy]",
# "n": "count"
# },
# "description": "This is a nharmonic dihedral"
# },
"opls": {
"form": "0.5*K_1*(1+cos(phi))+0.5*K_2*(1-cos(2*phi))+0.5*K_3*(1+cos(3*phi))+0.5*K_4*(1-cos(4*phi))",
"parameters": ["K_1", "K_2", "K_3", "K_4"],
"units": {
"K_1": "[energy]",
"K_2": "[energy]",
"K_3": "[energy]",
"K_4": "[energy]",
},
"description": "This is a opls dihedral"
},
"quadratic": {
"form": "K*(phi-phi0)**2",
"parameters": ["K", "phi0"],
"units": {
"K": "[energy]",
"phi0": "[arcunit]"
},
"description": "This is a quadratic dihedral"
},
"restricted": {
"form": "0.5*K*(cos(phi)-cos(phi0))**2/(sin(phi)**2)",
"parameters": ["K", "phi0"],
"units": {
"K": "[energy]",
"phi0": "[arcunit]",
},
"description": "This is a restricted bending angle found in Gromacs"
},
#######IMPROPERS START HERE
#"class2_improper": {},
#"hybrid_improper": {},
#"none_improper": {},
#"ring_improper": {},
#"umbrella_improper": {},
#"zero_improper": {},
"cossq_improper": {
"form": "0.5*K*(cos(chi-chi0))**2",
"parameters": ["K", "chi0"],
"units": {
"K": "[energy]",
"chi0": "[arcunit]"
},
"description": "This is a cossq improper"
},
"cvff_improper": {
"form": "K*(1+d*cos(n*chi))",
"parameters": ["K", "d", "n"],
"units": {
"K": "[energy]",
"d": "phase",
"n": "count"
},
"description": "This is a cvff improper"
},
"distance_improper": {
"form": "K_2*r**2+K_4*r**4",
"parameters": ["K_2", "K_4"],
"units": {
"K_2": "[energy] * [length]**2",
"K_4": "[energy] * [length]**4",
},
"description": "This is a distance improper"
},
"fourier_improper": {
"form": "K*(C0+C1*cos(omega)+C2*cos(2*omega))",
"parameters": ["K", "C0", "C1", "C2"],
"units": {
"K": "[energy]",
"C0": "dimensionless",
"C1": "dimensionless",
"C2": "dimensionless",
},
"description": "This is a fourier improper"
},
"harmonic_improper": {
"form": "K*(chi-chi0)**2",
"parameters": ["K", "chi0"],
"units": {
"K": "[energy]",
"chi0": "[arcunit]",
},
"description": "This is a harmonic improper"
},
}
### Do NOT edit below this line
four_body_metadata = {}
# Valid variables used in all four-body terms
four_body_metadata["variables"] = {
"phi": {
"units": "[arcunit]",
"description": "The dihedral between the indexed atom1, atom2, atom3, and atom4.",
},
"theta": {
"units": "[arcunit]",
"description": "Angle between three given atoms"
},
"r": {
"units": "[distance]",
"description": "Distance between two given atoms"
},
"chi": {
"units": "[arcunit]",
"description": "Improper angle"
},
"omega": {
"units":
"[arcunit]",
"description":
"Angle between the vector formed by a non-central atom and the plane formed by the other three atoms"
}
}
# Add store data
four_body_metadata["store_name"] = "4body"
four_body_metadata["store_indices"] = {
"atom1": "Index of the first atom.",
"atom2": "Index of the second atom.",
"atom3": "Index of the third atom.",
"atom4": "Index of the fourth atom.",
"term_index": "Index of four_body_type stored in the DataLayer.",
}
four_body_metadata["index_columns"] = ["atom1", "atom2", "atom3", "atom4"]
four_body_metadata["forms"] = _four_body_functional_forms
|
|
from typing import Any, Dict, List, Union
from unittest import mock
import orjson
from zerver.lib.actions import (
do_remove_realm_custom_profile_field,
do_update_user_custom_profile_data_if_changed,
try_add_realm_custom_profile_field,
try_reorder_realm_custom_profile_fields,
)
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.markdown import markdown_convert
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import queries_captured
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
custom_profile_fields_for_realm,
get_realm,
)
class CustomProfileFieldTestCase(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.realm = get_realm("zulip")
self.original_count = len(custom_profile_fields_for_realm(self.realm.id))
def custom_field_exists_in_realm(self, field_id: int) -> bool:
fields = custom_profile_fields_for_realm(self.realm.id)
field_ids = [field.id for field in fields]
return field_id in field_ids
class CreateCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_create(self) -> None:
self.login("iago")
realm = get_realm("zulip")
data: Dict[str, Any] = {"name": "Phone", "field_type": "text id"}
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'Argument "field_type" is not valid JSON.')
data["name"] = ""
data["field_type"] = 100
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Label cannot be blank.")
data["name"] = "*" * 41
data["field_type"] = 100
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "name is too long (limit: 40 characters)")
data["name"] = "Phone"
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Invalid field type.")
data["name"] = "Phone"
data["hint"] = "*" * 81
data["field_type"] = CustomProfileField.SHORT_TEXT
result = self.client_post("/json/realm/profile_fields", info=data)
msg = "hint is too long (limit: 80 characters)"
self.assert_json_error(result, msg)
data["name"] = "Phone"
data["hint"] = "Contact number"
data["field_type"] = CustomProfileField.SHORT_TEXT
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
field = CustomProfileField.objects.get(name="Phone", realm=realm)
self.assertEqual(field.id, field.order)
data["name"] = "Name "
data["hint"] = "Some name"
data["field_type"] = CustomProfileField.SHORT_TEXT
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
field = CustomProfileField.objects.get(name="Name", realm=realm)
self.assertEqual(field.id, field.order)
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "A field with that label already exists.")
def test_create_select_field(self) -> None:
self.login("iago")
data: Dict[str, Union[str, int]] = {}
data["name"] = "Favorite programming language"
data["field_type"] = CustomProfileField.SELECT
data["field_data"] = "invalid"
result = self.client_post("/json/realm/profile_fields", info=data)
error_msg = "Bad value for 'field_data': invalid"
self.assert_json_error(result, error_msg)
data["field_data"] = orjson.dumps(
{
"python": ["1"],
"java": ["2"],
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "field_data is not a dict")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python"},
"java": {"text": "Java"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "order key is missing from field_data")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python", "order": ""},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["order"] cannot be blank.')
data["field_data"] = orjson.dumps(
{
"": {"text": "Python", "order": "1"},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "'value' cannot be blank.")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python", "order": 1},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["order"] is not a string')
data["field_data"] = orjson.dumps({}).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Field must have at least one choice.")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python", "order": "1"},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
def test_create_default_external_account_field(self) -> None:
self.login("iago")
realm = get_realm("zulip")
field_type: int = CustomProfileField.EXTERNAL_ACCOUNT
field_data: str = orjson.dumps(
{
"subtype": "twitter",
}
).decode()
invalid_field_name: str = "Not required field name"
invalid_field_hint: str = "Not required field hint"
result = self.client_post(
"/json/realm/profile_fields",
info=dict(
field_type=field_type,
field_data=field_data,
hint=invalid_field_hint,
name=invalid_field_name,
),
)
self.assert_json_success(result)
# Silently overwrite name and hint with values set in default fields dict
# for default custom external account fields.
with self.assertRaises(CustomProfileField.DoesNotExist):
field = CustomProfileField.objects.get(name=invalid_field_name, realm=realm)
# The field is created with 'Twitter' name as per values in default fields dict
field = CustomProfileField.objects.get(name="Twitter")
self.assertEqual(field.name, DEFAULT_EXTERNAL_ACCOUNTS["twitter"]["name"])
self.assertEqual(field.hint, DEFAULT_EXTERNAL_ACCOUNTS["twitter"]["hint"])
result = self.client_delete(f"/json/realm/profile_fields/{field.id}")
self.assert_json_success(result)
# Should also work without name or hint and only external field type and subtype data
result = self.client_post(
"/json/realm/profile_fields", info=dict(field_type=field_type, field_data=field_data)
)
self.assert_json_success(result)
# Default external account field data cannot be updated
field = CustomProfileField.objects.get(name="Twitter", realm=realm)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Twitter username", "field_type": CustomProfileField.EXTERNAL_ACCOUNT},
)
self.assert_json_error(result, "Default custom field cannot be updated.")
result = self.client_delete(f"/json/realm/profile_fields/{field.id}")
self.assert_json_success(result)
def test_create_external_account_field(self) -> None:
self.login("iago")
realm = get_realm("zulip")
data: Dict[str, Union[str, int, Dict[str, str]]] = {}
data["name"] = "Twitter"
data["field_type"] = CustomProfileField.EXTERNAL_ACCOUNT
data["field_data"] = "invalid"
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Bad value for 'field_data': invalid")
data["field_data"] = orjson.dumps({}).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "subtype key is missing from field_data")
data["field_data"] = orjson.dumps(
{
"subtype": "",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["subtype"] cannot be blank.')
data["field_data"] = orjson.dumps(
{
"subtype": "123",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Invalid external account type")
non_default_external_account = "linkedin"
data["field_data"] = orjson.dumps(
{
"subtype": non_default_external_account,
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Invalid external account type")
data["field_data"] = orjson.dumps(
{
"subtype": "twitter",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
twitter_field = CustomProfileField.objects.get(name="Twitter", realm=realm)
self.assertEqual(twitter_field.field_type, CustomProfileField.EXTERNAL_ACCOUNT)
self.assertEqual(twitter_field.name, "Twitter")
self.assertEqual(orjson.loads(twitter_field.field_data)["subtype"], "twitter")
data["name"] = "Reddit"
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Custom external account must define URL pattern")
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": 123,
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["url_pattern"] is not a string')
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": "invalid",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Malformed URL pattern.")
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": "https://www.reddit.com/%(username)s/user/%(username)s",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Malformed URL pattern.")
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": "reddit.com/%(username)s",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["url_pattern"] is not a URL')
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": "https://www.reddit.com/user/%(username)s",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
custom_field = CustomProfileField.objects.get(name="Reddit", realm=realm)
self.assertEqual(custom_field.field_type, CustomProfileField.EXTERNAL_ACCOUNT)
self.assertEqual(custom_field.name, "Reddit")
field_data = orjson.loads(custom_field.field_data)
self.assertEqual(field_data["subtype"], "custom")
self.assertEqual(field_data["url_pattern"], "https://www.reddit.com/user/%(username)s")
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "A field with that label already exists.")
def test_create_field_of_type_user(self) -> None:
self.login("iago")
data = {
"name": "Your mentor",
"field_type": CustomProfileField.USER,
}
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
def test_not_realm_admin(self) -> None:
self.login("hamlet")
result = self.client_post("/json/realm/profile_fields")
self.assert_json_error(result, "Must be an organization administrator")
result = self.client_delete("/json/realm/profile_fields/1")
self.assert_json_error(result, "Must be an organization administrator")
class DeleteCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_delete(self) -> None:
self.login("iago")
realm = get_realm("zulip")
field = CustomProfileField.objects.get(name="Phone number", realm=realm)
result = self.client_delete("/json/realm/profile_fields/100")
self.assert_json_error(result, "Field id 100 not found.")
self.assertTrue(self.custom_field_exists_in_realm(field.id))
result = self.client_delete(f"/json/realm/profile_fields/{field.id}")
self.assert_json_success(result)
self.assertFalse(self.custom_field_exists_in_realm(field.id))
def test_delete_field_value(self) -> None:
iago = self.example_user("iago")
self.login_user(iago)
realm = get_realm("zulip")
invalid_field_id = 1234
result = self.client_delete(
"/json/users/me/profile_data",
{
"data": orjson.dumps([invalid_field_id]).decode(),
},
)
self.assert_json_error(result, f"Field id {invalid_field_id} not found.")
field = CustomProfileField.objects.get(name="Mentor", realm=realm)
data: List[Dict[str, Union[int, str, List[int]]]] = [
{"id": field.id, "value": [self.example_user("aaron").id]},
]
do_update_user_custom_profile_data_if_changed(iago, data)
iago_value = CustomProfileFieldValue.objects.get(user_profile=iago, field=field)
converter = field.FIELD_CONVERTERS[field.field_type]
self.assertEqual([self.example_user("aaron").id], converter(iago_value.value))
result = self.client_delete(
"/json/users/me/profile_data",
{
"data": orjson.dumps([field.id]).decode(),
},
)
self.assert_json_success(result)
# Don't throw an exception here
result = self.client_delete(
"/json/users/me/profile_data",
{
"data": orjson.dumps([field.id]).decode(),
},
)
self.assert_json_success(result)
def test_delete_internals(self) -> None:
user_profile = self.example_user("iago")
realm = user_profile.realm
field = CustomProfileField.objects.get(name="Phone number", realm=realm)
data: List[Dict[str, Union[int, str, List[int]]]] = [
{"id": field.id, "value": "123456"},
]
do_update_user_custom_profile_data_if_changed(user_profile, data)
self.assertTrue(self.custom_field_exists_in_realm(field.id))
self.assertEqual(user_profile.customprofilefieldvalue_set.count(), self.original_count)
do_remove_realm_custom_profile_field(realm, field)
self.assertFalse(self.custom_field_exists_in_realm(field.id))
self.assertEqual(user_profile.customprofilefieldvalue_set.count(), self.original_count - 1)
class UpdateCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_update(self) -> None:
self.login("iago")
realm = get_realm("zulip")
result = self.client_patch(
"/json/realm/profile_fields/100",
info={"name": "Phone number", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_error(result, "Field id 100 not found.")
field = CustomProfileField.objects.get(name="Phone number", realm=realm)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_error(result, "Label cannot be blank.")
self.assertEqual(CustomProfileField.objects.count(), self.original_count)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "New phone number", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_success(result)
field = CustomProfileField.objects.get(id=field.id, realm=realm)
self.assertEqual(CustomProfileField.objects.count(), self.original_count)
self.assertEqual(field.name, "New phone number")
self.assertIs(field.hint, "")
self.assertEqual(field.field_type, CustomProfileField.SHORT_TEXT)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "*" * 41, "field_type": CustomProfileField.SHORT_TEXT},
)
msg = "name is too long (limit: 40 characters)"
self.assert_json_error(result, msg)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={
"name": "New phone number",
"hint": "*" * 81,
"field_type": CustomProfileField.SHORT_TEXT,
},
)
msg = "hint is too long (limit: 80 characters)"
self.assert_json_error(result, msg)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={
"name": "New phone number",
"hint": "New contact number",
"field_type": CustomProfileField.SHORT_TEXT,
},
)
self.assert_json_success(result)
field = CustomProfileField.objects.get(id=field.id, realm=realm)
self.assertEqual(CustomProfileField.objects.count(), self.original_count)
self.assertEqual(field.name, "New phone number")
self.assertEqual(field.hint, "New contact number")
self.assertEqual(field.field_type, CustomProfileField.SHORT_TEXT)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Name ", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_success(result)
field.refresh_from_db()
self.assertEqual(field.name, "Name")
field = CustomProfileField.objects.get(name="Favorite editor", realm=realm)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Favorite editor", "field_data": "invalid"},
)
self.assert_json_error(result, "Bad value for 'field_data': invalid")
field_data = orjson.dumps(
{
"vim": "Vim",
"emacs": {"order": "2", "text": "Emacs"},
}
).decode()
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Favorite editor", "field_data": field_data},
)
self.assert_json_error(result, "field_data is not a dict")
field_data = orjson.dumps(
{
"vim": {"order": "1", "text": "Vim"},
"emacs": {"order": "2", "text": "Emacs"},
"notepad": {"order": "3", "text": "Notepad"},
}
).decode()
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Favorite editor", "field_data": field_data},
)
self.assert_json_success(result)
def test_update_is_aware_of_uniqueness(self) -> None:
self.login("iago")
realm = get_realm("zulip")
field_1 = try_add_realm_custom_profile_field(realm, "Phone", CustomProfileField.SHORT_TEXT)
field_2 = try_add_realm_custom_profile_field(
realm, "Phone 1", CustomProfileField.SHORT_TEXT
)
self.assertTrue(self.custom_field_exists_in_realm(field_1.id))
self.assertTrue(self.custom_field_exists_in_realm(field_2.id))
result = self.client_patch(
f"/json/realm/profile_fields/{field_2.id}",
info={"name": "Phone", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_error(result, "A field with that label already exists.")
def assert_error_update_invalid_value(
self, field_name: str, new_value: object, error_msg: str
) -> None:
self.login("iago")
realm = get_realm("zulip")
field = CustomProfileField.objects.get(name=field_name, realm=realm)
# Update value of field
result = self.client_patch(
"/json/users/me/profile_data",
{"data": orjson.dumps([{"id": field.id, "value": new_value}]).decode()},
)
self.assert_json_error(result, error_msg)
def test_update_invalid_field(self) -> None:
self.login("iago")
data = [{"id": 1234, "value": "12"}]
result = self.client_patch(
"/json/users/me/profile_data",
{
"data": orjson.dumps(data).decode(),
},
)
self.assert_json_error(result, "Field id 1234 not found.")
def test_update_invalid_short_text(self) -> None:
field_name = "Phone number"
self.assert_error_update_invalid_value(
field_name, "t" * 201, f"{field_name} is too long (limit: 50 characters)"
)
def test_update_invalid_date(self) -> None:
field_name = "Birthday"
self.assert_error_update_invalid_value(field_name, "a-b-c", f"{field_name} is not a date")
self.assert_error_update_invalid_value(
field_name, "1909-3-5", f"{field_name} is not a date"
)
self.assert_error_update_invalid_value(field_name, [123], f"{field_name} is not a string")
def test_update_invalid_url(self) -> None:
field_name = "Favorite website"
self.assert_error_update_invalid_value(field_name, "not URL", f"{field_name} is not a URL")
def test_update_invalid_user_field(self) -> None:
field_name = "Mentor"
invalid_user_id = 1000
self.assert_error_update_invalid_value(
field_name, [invalid_user_id], f"Invalid user ID: {invalid_user_id}"
)
def test_update_profile_data_successfully(self) -> None:
self.login("iago")
realm = get_realm("zulip")
fields = [
("Phone number", "*short* text data"),
("Biography", "~~short~~ **long** text data"),
("Favorite food", "long short text data"),
("Favorite editor", "vim"),
("Birthday", "1909-03-05"),
("Favorite website", "https://zulip.com"),
("Mentor", [self.example_user("cordelia").id]),
("GitHub", "zulip-mobile"),
]
data = []
for i, field_value in enumerate(fields):
name, value = field_value
field = CustomProfileField.objects.get(name=name, realm=realm)
data.append(
{
"id": field.id,
"value": value,
"field": field,
}
)
# Update value of field
result = self.client_patch(
"/json/users/me/profile_data",
{"data": orjson.dumps([{"id": f["id"], "value": f["value"]} for f in data]).decode()},
)
self.assert_json_success(result)
iago = self.example_user("iago")
expected_value = {f["id"]: f["value"] for f in data}
expected_rendered_value: Dict[Union[int, float, str, None], Union[str, None]] = {}
for f in data:
if f["field"].is_renderable():
expected_rendered_value[f["id"]] = markdown_convert(f["value"]).rendered_content
else:
expected_rendered_value[f["id"]] = None
for field_dict in iago.profile_data():
self.assertEqual(field_dict["value"], expected_value[field_dict["id"]])
self.assertEqual(
field_dict["rendered_value"], expected_rendered_value[field_dict["id"]]
)
for k in ["id", "type", "name", "field_data"]:
self.assertIn(k, field_dict)
# Update value of one field.
field = CustomProfileField.objects.get(name="Biography", realm=realm)
data = [
{
"id": field.id,
"value": "foobar",
}
]
result = self.client_patch(
"/json/users/me/profile_data", {"data": orjson.dumps(data).decode()}
)
self.assert_json_success(result)
for field_dict in iago.profile_data():
if field_dict["id"] == field.id:
self.assertEqual(field_dict["value"], "foobar")
def test_update_invalid_select_field(self) -> None:
field_name = "Favorite editor"
self.assert_error_update_invalid_value(
field_name, "foobar", f"'foobar' is not a valid choice for '{field_name}'."
)
def test_update_select_field_successfully(self) -> None:
self.login("iago")
realm = get_realm("zulip")
field = CustomProfileField.objects.get(name="Favorite editor", realm=realm)
data = [
{
"id": field.id,
"value": "emacs",
}
]
result = self.client_patch(
"/json/users/me/profile_data", {"data": orjson.dumps(data).decode()}
)
self.assert_json_success(result)
def test_null_value_and_rendered_value(self) -> None:
self.login("iago")
realm = get_realm("zulip")
quote = try_add_realm_custom_profile_field(
realm=realm,
name="Quote",
hint="Saying or phrase which you known for.",
field_type=CustomProfileField.SHORT_TEXT,
)
iago = self.example_user("iago")
iago_profile_quote = iago.profile_data()[-1]
value = iago_profile_quote["value"]
rendered_value = iago_profile_quote["rendered_value"]
self.assertIsNone(value)
self.assertIsNone(rendered_value)
update_dict: Dict[str, Union[int, str, List[int]]] = {
"id": quote.id,
"value": "***beware*** of jealousy...",
}
do_update_user_custom_profile_data_if_changed(iago, [update_dict])
iago_profile_quote = self.example_user("iago").profile_data()[-1]
value = iago_profile_quote["value"]
rendered_value = iago_profile_quote["rendered_value"]
self.assertIsNotNone(value)
self.assertIsNotNone(rendered_value)
self.assertEqual("<p><strong><em>beware</em></strong> of jealousy...</p>", rendered_value)
def test_do_update_value_not_changed(self) -> None:
iago = self.example_user("iago")
self.login_user(iago)
realm = get_realm("zulip")
# Set field value:
field = CustomProfileField.objects.get(name="Mentor", realm=realm)
data: List[Dict[str, Union[int, str, List[int]]]] = [
{"id": field.id, "value": [self.example_user("aaron").id]},
]
do_update_user_custom_profile_data_if_changed(iago, data)
with mock.patch("zerver.lib.actions.notify_user_update_custom_profile_data") as mock_notify:
# Attempting to "update" the field value, when it wouldn't actually change,
# shouldn't trigger notify.
do_update_user_custom_profile_data_if_changed(iago, data)
mock_notify.assert_not_called()
class ListCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_list(self) -> None:
self.login("iago")
result = self.client_get("/json/realm/profile_fields")
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
content = result.json()
self.assert_length(content["custom_fields"], self.original_count)
def test_list_order(self) -> None:
self.login("iago")
realm = get_realm("zulip")
order = (
CustomProfileField.objects.filter(realm=realm)
.order_by("-order")
.values_list("order", flat=True)
)
try_reorder_realm_custom_profile_fields(realm, order)
result = self.client_get("/json/realm/profile_fields")
content = result.json()
self.assertListEqual(
content["custom_fields"], sorted(content["custom_fields"], key=lambda x: -x["id"])
)
def test_get_custom_profile_fields_from_api(self) -> None:
iago = self.example_user("iago")
test_bot = self.create_test_bot("foo-bot", iago)
self.login_user(iago)
assert test_bot
with queries_captured() as queries:
response = self.client_get(
"/json/users", {"client_gravatar": "false", "include_custom_profile_fields": "true"}
)
self.assert_length(queries, 4)
self.assertEqual(response.status_code, 200)
raw_users_data = response.json()["members"]
iago_raw_data = None
test_bot_raw_data = None
for user_dict in raw_users_data:
if user_dict["user_id"] == iago.id:
iago_raw_data = user_dict
continue
if user_dict["user_id"] == test_bot.id:
test_bot_raw_data = user_dict
continue
if (not iago_raw_data) or (not test_bot_raw_data):
raise AssertionError("Could not find required data from the response.")
expected_keys_for_iago = {
"delivery_email",
"email",
"user_id",
"avatar_url",
"avatar_version",
"is_admin",
"is_guest",
"is_billing_admin",
"is_bot",
"is_owner",
"role",
"full_name",
"timezone",
"is_active",
"date_joined",
"profile_data",
}
self.assertEqual(set(iago_raw_data.keys()), expected_keys_for_iago)
self.assertNotEqual(iago_raw_data["profile_data"], {})
expected_keys_for_test_bot = {
"delivery_email",
"email",
"user_id",
"avatar_url",
"avatar_version",
"is_admin",
"is_guest",
"is_bot",
"is_owner",
"is_billing_admin",
"role",
"full_name",
"timezone",
"is_active",
"date_joined",
"bot_type",
"bot_owner_id",
}
self.assertEqual(set(test_bot_raw_data.keys()), expected_keys_for_test_bot)
self.assertEqual(test_bot_raw_data["bot_type"], 1)
self.assertEqual(test_bot_raw_data["bot_owner_id"], iago_raw_data["user_id"])
response = self.client_get("/json/users", {"client_gravatar": "false"})
self.assertEqual(response.status_code, 200)
raw_users_data = response.json()["members"]
for user_dict in raw_users_data:
with self.assertRaises(KeyError):
user_dict["profile_data"]
def test_get_custom_profile_fields_from_api_for_single_user(self) -> None:
self.login("iago")
expected_keys = {
"result",
"msg",
"max_message_id",
"user_id",
"avatar_url",
"full_name",
"email",
"is_bot",
"is_admin",
"is_owner",
"is_billing_admin",
"role",
"profile_data",
"avatar_version",
"timezone",
"delivery_email",
"is_active",
"is_guest",
"date_joined",
}
url = "/json/users/me"
response = self.client_get(url)
self.assertEqual(response.status_code, 200)
raw_user_data = response.json()
self.assertEqual(set(raw_user_data.keys()), expected_keys)
class ReorderCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_reorder(self) -> None:
self.login("iago")
realm = get_realm("zulip")
order = list(
CustomProfileField.objects.filter(realm=realm)
.order_by("-order")
.values_list("order", flat=True)
)
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_success(result)
fields = CustomProfileField.objects.filter(realm=realm).order_by("order")
for field in fields:
self.assertEqual(field.id, order[field.order])
def test_reorder_duplicates(self) -> None:
self.login("iago")
realm = get_realm("zulip")
order = list(
CustomProfileField.objects.filter(realm=realm)
.order_by("-order")
.values_list("order", flat=True)
)
order.append(4)
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_success(result)
fields = CustomProfileField.objects.filter(realm=realm).order_by("order")
for field in fields:
self.assertEqual(field.id, order[field.order])
def test_reorder_unauthorized(self) -> None:
self.login("hamlet")
realm = get_realm("zulip")
order = list(
CustomProfileField.objects.filter(realm=realm)
.order_by("-order")
.values_list("order", flat=True)
)
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_error(result, "Must be an organization administrator")
def test_reorder_invalid(self) -> None:
self.login("iago")
order = [100, 200, 300]
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_error(result, "Invalid order mapping.")
order = [1, 2]
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_error(result, "Invalid order mapping.")
|
|
import appengine_config
import json
import logging
import uuid
import GlobalUtilities as tools
import spreadsheet_pipelines
from google.appengine.api import taskqueue, memcache
from google.appengine.ext import deferred
# Cloud Endpoints
import endpoints
from protorpc import remote
from endpoints_messages import *
## Cloud Endpoints Cookies - monkey patch
from endpoints import api_config
class PatchedApiConfigGenerator(api_config.ApiConfigGenerator):
def pretty_print_config_to_json(self, services, hostname=None):
json_string = super(PatchedApiConfigGenerator, self).pretty_print_config_to_json(
services, hostname=hostname)
to_patch = json.loads(json_string)
to_patch['auth'] = {'allowCookieAuth': True}
return json.dumps(to_patch, sort_keys=True, indent=2)
api_config.ApiConfigGenerator = PatchedApiConfigGenerator
## End monkey patch
endpoints_client_id = "AIzaSyB7k0LsUXibTJHkCx_D3MA0HT6tQAtYZAo"
endpoints_description = "GHI Donations API"
endpoints_clients = [endpoints_client_id, endpoints.API_EXPLORER_CLIENT_ID]
@endpoints.api(name='ghidonations', version='v1',
description=endpoints_description,
allowed_client_ids=endpoints_clients)
class EndpointsAPI(remote.Service):
#### ---- Globalhopeindia.org Utility Functions ---- ####
# public.all_teams
@endpoints.method(AllTeams_In, AllTeams_Out, path='public/all_teams',
http_method='GET', name='public.all_teams')
def public_all_teams(self, req):
s = tools.getKey(req.settings_key).get()
all_teams = []
for t in s.data.display_teams:
team = Team_Data(name=t.name, key=t.key.urlsafe())
all_teams.append(team)
return AllTeams_Out(objects=all_teams)
# public.individual_info
@endpoints.method(IndividualInfo_In, IndividualInfo_Out, path='public/individual_info',
http_method='GET', name='public.individual_info')
def public_individual_info(self, req):
i = tools.getKey(req.individual_key).get()
t_key = tools.getKey(req.team_key)
info = i.data.info(t_key)
return IndividualInfo_Out(image_url=info[0], name=info[1], description=info[2],
percentage=info[3], message=info[4])
# public.team_info
@endpoints.method(TeamInfo_In, TeamInfo_Out, path='public/team_info',
http_method='GET', name='public.team_info')
def public_team_info(self, req):
t = tools.getKey(req.team_key).get()
info_list = []
m_list = t.data.members_list
for m in m_list:
info = TeamInfo_Data(name=m[0], photo_url=m[1], tl_key=m[2])
info_list.append(info)
return TeamInfo_Out(info_list=info_list)
#### ---- Data Access ---- ####
# get.contacts
@endpoints.method(Query_In, Contacts_Out, path='get/contacts',
http_method='GET', name='get.contacts')
def get_contacts(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
if req.query == None:
req.query = ""
results = s.search.contact(req.query, query_cursor=req.query_cursor)
logging.info("Getting contacts with query: " + req.query)
contacts = []
new_cursor = tools.getWebsafeCursor(results[1])
for c in results[0]:
f = c.fields
contact = Contact_Data(key=f[0].value, name=f[1].value, email=tools.truncateEmail(f[2].value))
contacts.append(contact)
return Contacts_Out(objects=contacts, new_cursor=new_cursor)
# get.contact_donations
@endpoints.method(GetContactDonations_In, Donations_Out, path='get/contact_donations',
http_method='GET', name='get.contact_donations')
def get_contact_donations(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
query = "contact_key:" + str(req.contact_key)
results = s.search.donation(query, query_cursor=req.query_cursor)
logging.info("Getting contact donations with query: " + query)
donations = []
new_cursor = tools.getWebsafeCursor(results[1])
for d in results[0]:
f = d.fields
donation = Donation_Data(key=f[0].value, formatted_donation_date=f[9].value, name=f[2].value,
email=tools.truncateEmail(f[3].value),
payment_type=f[5].value, amount_donated=tools.moneyAmount(f[4].value))
donations.append(donation)
return Donations_Out(objects=donations, new_cursor=new_cursor)
# get.deposits
@endpoints.method(Query_In, Deposits_Out, path='get/deposits',
http_method='GET', name='get.deposits')
def get_deposits(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
if req.query == None:
req.query = ""
results = s.search.deposit(req.query, query_cursor=req.query_cursor)
logging.info("Getting deposits with query: " + req.query)
deposits = []
new_cursor = tools.getWebsafeCursor(results[1])
for de in results[0]:
f = de.fields
deposit = Deposit_Data(key=f[0].value, time_deposited=f[1].value)
deposits.append(deposit)
return Deposits_Out(objects=deposits, new_cursor=new_cursor)
# get.donations
@endpoints.method(Query_In, Donations_Out, path='get/donations',
http_method='GET', name='get.donations')
def get_donations(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
query = req.query
if query == None:
query = ""
results = s.search.donation(query, query_cursor=req.query_cursor)
logging.info("Getting donations with query: " + query)
donations = []
new_cursor = tools.getWebsafeCursor(results[1])
for d in results[0]:
f = d.fields
team_name = f[6].value
if not team_name:
team_name = " "
individual_name = f[7].value
if not individual_name:
individual_name = " "
donation = Donation_Data(key=f[0].value, formatted_donation_date=f[9].value, name=f[2].value,
email=tools.truncateEmail(f[3].value),
payment_type=f[5].value, amount_donated=tools.moneyAmount(f[4].value),
team_name=team_name, individual_name=individual_name)
donations.append(donation)
return Donations_Out(objects=donations, new_cursor=new_cursor)
# get.individuals
@endpoints.method(Query_In, Individuals_Out, path='get/individuals',
http_method='GET', name='get.individuals')
def get_individuals(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
if req.query == None:
req.query = ""
results = s.search.individual(req.query, query_cursor=req.query_cursor)
logging.info("Getting individuals with query: " + req.query)
individuals = []
new_cursor = tools.getWebsafeCursor(results[1])
for i in results[0]:
f = i.fields
individual = Individual_Data(key=f[0].value, name=f[1].value, email=f[2].value,
raised=tools.moneyAmount(f[4].value))
individuals.append(individual)
return Individuals_Out(objects=individuals, new_cursor=new_cursor)
# get.monthly_chart_data
@endpoints.method(NoRequestParams, JSON_Out, path='get/monthly_chart_data',
http_method='GET', name='get.monthly_chart_data')
def monthly_chart_data(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
json_data = s.one_month_history
return JSON_Out(json_data=json_data)
# mailchimp.lists
@endpoints.method(MailchimpLists_In, MailchimpLists_Out, path='mailchimp/lists',
http_method='GET', name='mailchimp.lists')
def mailchimp_lists(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
response = tools.getMailchimpLists(self, req.mc_apikey)
mc_lists = None
error_message = None
if response[0] == True:
mc_lists = json.dumps(response[1])
else:
mc_lists = None
error_message = response[1]
return MailchimpLists_Out(success=response[0], mc_lists=mc_lists, error_message=error_message)
# get.teams
@endpoints.method(Query_In, Teams_Out, path='get/teams',
http_method='GET', name='get.teams')
def get_teams(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
if req.query == None:
req.query = ""
results = s.search.team(req.query, query_cursor=req.query_cursor)
logging.info("Getting teams with query: " + req.query)
teams = []
new_cursor = tools.getWebsafeCursor(results[1])
for t in results[0]:
f = t.fields
team = Team_Data(key=f[0].value, name=f[1].value, donation_total=tools.moneyAmount(f[2].value))
teams.append(team)
return Teams_Out(objects=teams, new_cursor=new_cursor)
# get.team_donation_total
@endpoints.method(TeamKey_In, GetTeamDonationTotal_Out, path='get/team_donation_total',
http_method='GET', name='get.team_donation_total')
def get_team_donation_total(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
t = tools.getKey(req.team_key).get()
donation_total = tools.moneyAmount(t.data.donation_total)
return GetTeamDonationTotal_Out(donation_total=donation_total)
# get.team_members
@endpoints.method(GetTeamMembers_In, Individuals_Out, path='get/team_members',
http_method='GET', name='get.team_members')
def get_team_members(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
query = "team_key:" + str(req.team_key)
results = s.search.individual(query, query_cursor=req.query_cursor)
logging.info("Getting team members with query: " + query)
individuals = []
new_cursor = tools.getWebsafeCursor(results[1])
for i in results[0]:
f = i.fields
individual = Individual_Data(key=f[0].value, name=f[1].value, email=f[2].value,
raised=tools.moneyAmount(f[4].value))
individuals.append(individual)
return Individuals_Out(objects=individuals, new_cursor=new_cursor)
# get.team_totals
@endpoints.method(IndividualKey_In, GetTeamTotals_Out, path='get/team_totals',
http_method='GET', name='get.team_totals')
def get_team_totals(self, req):
s = tools.getSettingsKey(self, endpoints=True).get()
i = tools.getKey(req.individual_key).get()
team_totals = []
for tl in i.teamlist_entities:
total = GetTeamTotals_Data(team_name=tl.team_name, donation_total=tools.moneyAmount(tl.data.donation_total))
team_totals.append(total)
return GetTeamTotals_Out(team_totals=team_totals)
# get.individual_donations
@endpoints.method(GetIndividualDonations_In, Donations_Out, path='semi/get/individual_donations',
http_method='GET', name='semi.get.individual_donations')
def semi_get_individual_donations(self, req):
isAdmin, s = tools.checkAuthentication(self, False, from_endpoints=True)
query = "individual_key:" + str(req.individual_key)
results = s.search.donation(query, query_cursor=req.query_cursor)
logging.info("Getting individual donations with query: " + query)
donations = []
new_cursor = tools.getWebsafeCursor(results[1])
for d in results[0]:
f = d.fields
team_name = f[6].value
if team_name == None:
team_name = ""
donation = Donation_Data(key=f[0].value, formatted_donation_date=f[9].value, name=f[2].value,
email=tools.truncateEmail(f[3].value),
payment_type=f[5].value, amount_donated=tools.moneyAmount(f[4].value),
team_name=team_name)
donations.append(donation)
return Donations_Out(objects=donations, new_cursor=new_cursor)
# semi.get.team_members
@endpoints.method(SemiGetTeamMembers_In, SemiGetTeamMembers_Out, path='semi/get/team_members',
http_method='GET', name='semi.get.team_members')
def semi_get_team_members(self, req):
# Returns team information
isAdmin, s = tools.checkAuthentication(self, False, from_endpoints=True)
t = tools.getKey(req.team_key).get()
members_list = t.data.members_list
members = []
for m in members_list:
member = SemiGetTeamMembers_Data(key=m[2], name=m[0])
members.append(member)
return SemiGetTeamMembers_Out(objects=members)
#### ---- Data creation/updating ---- ####
# donation.mark_unreviewed
@endpoints.method(DonationKey_In, SuccessMessage_Out, path='donation/mark_unreviewed',
http_method='POST', name='donation.mark_unreviewed')
def donation_mark_unreviewed(self, req):
message = "Donation marked as unreviewed"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
d = tools.getKey(req.donation_key).get()
d.review.markUnreviewed()
return SuccessMessage_Out(success=success, message=message)
# new.contact
@endpoints.method(NewContact_In, SuccessMessage_Out, path='new/contact',
http_method='POST', name='new.contact')
def new_contact(self, req):
message = "<b>" + req.name + "</b> created"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
contact_exists = s.exists.contact(email=req.email)
address = [req.address.street, req.address.city, req.address.state, req.address.zipcode]
if contact_exists[0] == False:
s.create.contact(req.name, email=req.email, phone=req.phone, address=address, notes=req.notes, add_mc=True)
else:
# If this email address already exists for a user
message = "Whoops! You entered an email address already in use by another contact."
success = False
return SuccessMessage_Out(success=success, message=message)
# new.impression
@endpoints.method(NewImpression_In, SuccessMessage_Out, path='new/impression',
http_method='POST', name='new.impression')
def new_impression(self, req):
message = "Impression saved"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
c = tools.getKey(req.contact_key).get()
c.create.impression(req.impression, req.notes)
return SuccessMessage_Out(success=success, message=message)
# new.individual
@endpoints.method(NewIndividual_In, SuccessMessage_Out, path='new/individual',
http_method='POST', name='new.individual')
def new_individual(self, req):
message = "<b>" + req.name + "</b> created"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
exists = s.exists.individual(req.email)
email, team_key = req.email, req.team_key
if email == "": email = None
if team_key == "team": team_key = None
if email == None or exists[0] == False:
s.create.individual(req.name, tools.getKey(team_key), email, req.password, admin=req.admin)
else:
# If this email address already exists for a user
message = "Sorry, but this email address is already being used."
success = False
return SuccessMessage_Out(success=success, message=message)
# new.team
@endpoints.method(NewTeam_In, SuccessMessage_Out, path='new/team',
http_method='POST', name='new.team')
def new_team(self, req):
message = "<b>" + req.name + "</b> created"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
s.create.team(req.name)
return SuccessMessage_Out(success=success, message=message)
# new.offline_donation
@endpoints.method(NewOfflineDonation_In, SuccessMessage_Out, path='new/offline_donation',
http_method='POST', name='new.offline_donation')
def new_offline_donation(self, req):
message = "Offline donation created"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
# Make req variables local
name, email, amount_donated, notes, address, team_key, individual_key, \
add_deposit = req.name, req.email, tools.toDecimal(req.amount_donated), req.notes, \
req.address, req.team_key, req.individual_key, req.add_deposit
# Check for null value in individual field
if individual_key == "none":
individual_key = None
if address:
address = [address.street, address.city, address.state, address.zipcode]
if team_key:
team_key = tools.getKey(team_key)
if individual_key:
individual_key = tools.getKey(individual_key)
s.create.donation(name, email, amount_donated, "offline", address=address, team_key=team_key,
individual_key=individual_key, add_deposit=add_deposit, special_notes=notes)
return SuccessMessage_Out(success=success, message=message)
# update.donation
@endpoints.method(UpdateDonation_In, SuccessMessage_Out, path='update/donation',
http_method='POST', name='update.donation')
def update_donation(self, req):
message = "Donation has been saved"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
d = tools.getKey(req.donation_key).get()
# Make req variables local
team_key, individual_key = req.team_key, req.individual_key
if team_key:
if team_key == "general":
team_key = None
else:
team_key = tools.getKey(team_key)
if individual_key:
individual_key = tools.getKey(individual_key)
d.update(req.notes, team_key, individual_key, None, req.donation_date)
return SuccessMessage_Out(success=success, message=message)
# update.contact
@endpoints.method(UpdateContact_In, SuccessMessage_Out, path='update/contact',
http_method='POST', name='update.contact')
def update_contact(self, req):
message = "Contact has been saved"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
c = tools.getKey(req.contact_key).get()
a = req.address
address = [a.street, a.city, a.state, a.zipcode]
# Check to see if a new email was added and see if it already exists
list_diff = tools.listDiff(c.email, req.email)
if list_diff:
email_exists = s.exists.contact(email=list_diff)[0]
else:
email_exists = False
if email_exists == True:
success = False
message = "Whoops! You entered an email address already in use by another contact."
else:
c.update(req.name, req.email, req.phone, req.notes, address)
return SuccessMessage_Out(success=success, message=message)
# update.settings
@endpoints.method(UpdateSettings_In, SuccessMessage_Out, path='update/settings',
http_method='POST', name='update.settings')
def update_settings(self, req):
message = "Settings have been updated"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
s.update(req.name, req.email, req.mc_use, req.mc_apikey, req.mc_donorlist,
req.paypal_id, req.impressions, req.donate_parent, req.amount1, req.amount2, req.amount3,
req.amount4, req.use_custom, req.confirmation_header, req.confirmation_info,
req.confirmation_footer, req.confirmation_text, req.donor_report_text)
return SuccessMessage_Out(success=success, message=message)
# update.team
@endpoints.method(UpdateTeam_In, SuccessMessage_Out, path='update/team',
http_method='POST', name='update.team')
def update_team(self, req):
message = "Team has been updated"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
t = tools.getKey(req.team_key).get()
t.update(req.name, req.show_team)
return SuccessMessage_Out(success=success, message=message)
#### ---- Contact merge ---- ####
# merge.contacts
@endpoints.method(MergeContacts_In, SuccessMessage_Out, path='merge/contacts',
http_method='POST', name='merge.contacts')
def merge_contacts(self, req):
message = "Contacts merged"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
c1 = tools.getKey(req.contact1)
c2 = tools.getKey(req.contact2)
tools.mergeContacts(c1, c2)
return SuccessMessage_Out(success=success, message=message)
#### ---- Search ---- ####
# get.contacts_json
@endpoints.method(NoRequestParams, JSON_Out, path='get/contacts_json',
http_method='GET', name='get.contacts_json')
def get_contacts_json(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
return JSON_Out(json_data=s.data.contactsJSON)
#### ---- Donation depositing ---- ####
# deposits.add
@endpoints.method(Deposits_In, SuccessMessage_Out, path='deposits/add',
http_method='POST', name='deposits.add')
def deposits_add(self, req):
message = "Donations deposited."
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
if req.donation_keys != []:
donation_keys = []
for k in req.donation_keys:
donation_keys.append(str(k))
deferred.defer(s.deposits.deposit, donation_keys, _queue="backend")
else:
message = "No donations to deposit."
success = False
return SuccessMessage_Out(success=success, message=message)
# deposits.remove
@endpoints.method(Deposits_In, SuccessMessage_Out, path='deposits/remove',
http_method='POST', name='deposits.remove')
def deposits_remove(self, req):
message = "Donations removed from deposits."
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
if req.donation_keys != []:
donation_keys = []
for k in req.donation_keys:
donation_keys.append(str(k))
deferred.defer(s.deposits.remove, donation_keys, _queue="backend")
return SuccessMessage_Out(success=success, message=message)
#### ---- Confirmation Letters ---- ####
# confirmation.email
@endpoints.method(DonationKey_In, SuccessMessage_Out, path='confirmation/email',
http_method='POST', name='confirmation.email')
def confirmation_email(self, req):
message = "Email sent"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
d = tools.getKey(req.donation_key).get()
if not d.email or d.email == ['']:
success = False
message = "Email not sent. Contact doesn't have an email address."
else:
# Email receipt to donor
d.review.archive()
d.confirmation.task(60)
return SuccessMessage_Out(success=success, message=message)
# confirmation.print
@endpoints.method(DonationKey_In, ConfirmationPrint_Out, path='confirmation/print',
http_method='POST', name='confirmation.print')
def printReceipt(self, req):
message = "Receipt open for printing"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
d = tools.getKey(req.donation_key).get()
# Print receipt to donor
d.review.archive()
print_url = d.confirmation.print_url(None)
return ConfirmationPrint_Out(success=success, message=message, print_url=print_url)
# confirmation.annual_report
@endpoints.method(ConfirmationAnnualReport_In, SuccessMessage_Out, path='confirmation/annual_report',
http_method='POST', name='confirmation.annual_report')
def confirmation_annual_report(self, req):
message = "Annual report sent"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
taskqueue.add(queue_name="annualreport", url="/tasks/annualreport",
params={'contact_key': req.contact_key, 'year': req.year})
return SuccessMessage_Out(success=success, message=message)
# donation.archive
@endpoints.method(DonationKey_In, SuccessMessage_Out, path='donation/archive',
http_method='POST', name='donation.archive')
def donation_archive(self, req):
message = "Donation archived"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
d = tools.getKey(req.donation_key).get()
d.review.archive()
return SuccessMessage_Out(success=success, message=message)
# donation.delete
@endpoints.method(DonationKey_In, SuccessMessage_Out, path='donation/delete',
http_method='POST', name='donation.delete')
def donation_delete(self, req):
message = "Donation deleted"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
tools.getKey(req.donation_key).delete()
return SuccessMessage_Out(success=success, message=message)
# contact.delete
@endpoints.method(ContactKey_In, SuccessMessage_Out, path='contact/delete',
http_method='POST', name='contact.delete')
def contact_delete(self, req):
message = "Contact deleted"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
tools.getKey(req.contact_key).delete()
return SuccessMessage_Out(success=success, message=message)
# contact.info
@endpoints.method(ContactKey_In, Contact_Info, path='contact/info',
http_method='POST', name='contact.info')
def contact_info(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
c = tools.getKey(req.contact_key).get()
return Contact_Info(key=c.websafe, name=c.name, email=c.email,
address=c.address)
# team.delete
@endpoints.method(TeamKey_In, SuccessMessage_Out, path='team/delete',
http_method='POST', name='team.delete')
def deleteTeam(self, req):
message = "Team deleted"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
tools.getKey(req.team_key).delete()
return SuccessMessage_Out(success=success, message=message)
# individual.delete
@endpoints.method(IndividualKey_In, SuccessMessage_Out, path='individual/delete',
http_method='POST', name='individual.delete')
def individual_delete(self, req):
message = "Individual deleted"
success = True
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
user_key = tools.getUserKey(self)
isAdmin = user_key.get().admin
i_key = tools.getKey(req.individual_key)
if isAdmin == True:
i_key.delete()
else:
if user_key == individual_key:
i_key.delete()
else:
# Access denied - non-admin trying to delete someone else
message = "Failed - Access denied"
success = False
return SuccessMessage_Out(success=success, message=message)
#### ---- Spreadsheet Export Controllers ---- ####
# spreadsheet.start
@endpoints.method(SpreadsheetStart_In, SpreadsheetStart_Out, path='spreadsheet/start',
http_method='POST', name='spreadsheet.start')
def spreadsheet_start(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
if req.mode not in ["contacts", "donations", "individuals"]:
raise endpoints.NotFoundException("Incorrect spreadsheet mode")
# Create unique identifier for this job
job_id = str(uuid.uuid4())
memcache.set(job_id, 0)
stage = spreadsheet_pipelines.GenerateReport(s.websafe, req.mode, job_id)
stage.start(queue_name='spreadsheet')
pipeline_id = stage.pipeline_id
memcache.set("id" + job_id, pipeline_id)
return SpreadsheetStart_Out(job_id=job_id)
# spreadsheet.check
@endpoints.method(SpreadsheetCheck_In, SpreadsheetCheck_Out, path='spreadsheet/check',
http_method='POST', name='spreadsheet.check')
def spreadsheet_check(self, req):
isAdmin, s = tools.checkAuthentication(self, True, from_endpoints=True)
completed, gcs_file_key = tools.checkTaskCompletion(s, req.job_id)
status = tools.pipelineStatus(req.job_id)
if completed:
download_url = "http://commondatastorage.googleapis.com/" + gcs_file_key[1:]
else:
download_url = None
return SpreadsheetCheck_Out(completed=completed, download_url=download_url, status=status)
app = endpoints.api_server([EndpointsAPI], restricted=False)
app = appengine_config.webapp_add_wsgi_middleware(app)
|
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/ttfonts.py
"""TrueType font support
This defines classes to represent TrueType fonts. They know how to calculate
their own width and how to write themselves into PDF files. They support
subsetting and embedding and can represent all 16-bit Unicode characters.
Note on dynamic fonts
---------------------
Usually a Font in ReportLab corresponds to a fixed set of PDF objects (Font,
FontDescriptor, Encoding). But with dynamic font subsetting a single TTFont
will result in a number of Font/FontDescriptor/Encoding object sets, and the
contents of those will depend on the actual characters used for printing.
To support dynamic font subsetting a concept of "dynamic font" was introduced.
Dynamic Fonts have a _dynamicFont attribute set to 1. Since other Font object
may lack a this attribute, you should use constructs like
if getattr(font, '_dynamicFont', 0):
# dynamic font
else:
# traditional static font
Dynamic fonts have the following additional functions:
def splitString(self, text, doc):
'''Splits text into a number of chunks, each of which belongs to a
single subset. Returns a list of tuples (subset, string). Use
subset numbers with getSubsetInternalName. Doc is used to identify
a document so that different documents may have different dynamically
constructed subsets.'''
def getSubsetInternalName(self, subset, doc):
'''Returns the name of a PDF Font object corresponding to a given
subset of this dynamic font. Use this function instead of
PDFDocument.getInternalFontName.'''
You must never call PDFDocument.getInternalFontName for dynamic fonts.
If you have a traditional static font, mapping to PDF text output operators
is simple:
'%s 14 Tf (%s) Tj' % (getInternalFontName(psfontname), text)
If you have a dynamic font, use this instead:
for subset, chunk in font.splitString(text, doc):
'%s 14 Tf (%s) Tj' % (font.getSubsetInternalName(subset, doc), chunk)
(Tf is a font setting operator and Tj is a text ouput operator. You should
also escape invalid characters in Tj argument, see TextObject._formatText.
Oh, and that 14 up there is font size.)
Canvas and TextObject have special support for dynamic fonts.
"""
__version__ = '$Id: ttfonts.py,v 1.1 2006/05/26 19:19:47 thomas Exp $'
import string
from types import StringType
from struct import pack, unpack
from cStringIO import StringIO
from reportlab.pdfbase import pdfmetrics, pdfdoc
def _L2U32(L):
return unpack('l',pack('L',L))[0]
class TTFError(pdfdoc.PDFError):
"TrueType font exception"
pass
#
# Helpers
#
from codecs import utf_8_encode, utf_8_decode, latin_1_decode
parse_utf8=lambda x, decode=utf_8_decode: map(ord,decode(x)[0])
parse_latin1 = lambda x, decode=latin_1_decode: map(ord,decode(x)[0])
def latin1_to_utf8(text):
"helper to convert when needed from latin input"
return utf_8_encode(latin_1_decode(text)[0])[0]
def makeToUnicodeCMap(fontname, subset):
"""Creates a ToUnicode CMap for a given subset. See Adobe
_PDF_Reference (ISBN 0-201-75839-3) for more information."""
cmap = [
"/CIDInit /ProcSet findresource begin",
"12 dict begin",
"begincmap",
"/CIDSystemInfo",
"<< /Registry (%s)" % fontname,
"/Ordering (%s)" % fontname,
"/Supplement 0",
">> def",
"/CMapName /%s def" % fontname,
"/CMapType 2 def",
"1 begincodespacerange",
"<00> <%02X>" % (len(subset) - 1),
"endcodespacerange",
"%d beginbfchar" % len(subset)
] + map(lambda n, subset=subset: "<%02X> <%04X>" % (n, subset[n]),
range(len(subset))) + [
"endbfchar",
"endcmap",
"CMapName currentdict /CMap defineresource pop",
"end",
"end"
]
return string.join(cmap, "\n")
def splice(stream, offset, value):
"""Splices the given value into stream at the given offset and
returns the resulting stream (the original is unchanged)"""
return stream[:offset] + value + stream[offset + len(value):]
def _set_ushort(stream, offset, value):
"""Writes the given unsigned short value into stream at the given
offset and returns the resulting stream (the original is unchanged)"""
return splice(stream, offset, pack(">H", value))
import sys
try:
import _rl_accel
except ImportError:
try:
from reportlab.lib import _rl_accel
except ImportError:
_rl_accel = None
try:
hex32 = _rl_accel.hex32
except:
def hex32(i):
return '0X%8.8X' % (long(i)&0xFFFFFFFFL)
try:
add32 = _rl_accel.add32
except:
if sys.hexversion>=0x02030000:
def add32(x, y):
"Calculate (x + y) modulo 2**32"
return _L2U32((long(x)+y) & 0xffffffffL)
else:
def add32(x, y):
"Calculate (x + y) modulo 2**32"
lo = (x & 0xFFFF) + (y & 0xFFFF)
hi = (x >> 16) + (y >> 16) + (lo >> 16)
return (hi << 16) | (lo & 0xFFFF)
try:
calcChecksum = _rl_accel.calcChecksum
except:
def calcChecksum(data):
"""Calculates PDF-style checksums"""
if len(data)&3: data = data + (4-(len(data)&3))*"\0"
sum = 0
for n in unpack(">%dl" % (len(data)>>2), data):
sum = add32(sum,n)
return sum
del _rl_accel, sys
#
# TrueType font handling
#
GF_ARG_1_AND_2_ARE_WORDS = 1 << 0
GF_ARGS_ARE_XY_VALUES = 1 << 1
GF_ROUND_XY_TO_GRID = 1 << 2
GF_WE_HAVE_A_SCALE = 1 << 3
GF_RESERVED = 1 << 4
GF_MORE_COMPONENTS = 1 << 5
GF_WE_HAVE_AN_X_AND_Y_SCALE = 1 << 6
GF_WE_HAVE_A_TWO_BY_TWO = 1 << 7
GF_WE_HAVE_INSTRUCTIONS = 1 << 8
GF_USE_MY_METRICS = 1 << 9
GF_OVERLAP_COMPOUND = 1 << 10
GF_SCALED_COMPONENT_OFFSET = 1 << 11
GF_UNSCALED_COMPONENT_OFFSET = 1 << 12
def TTFOpenFile(fn):
'''Opens a TTF file possibly after searching TTFSearchPath
returns (filename,file)
'''
from reportlab.lib.utils import rl_isfile, open_for_read
try:
f = open_for_read(fn,'rb')
return fn, f
except IOError:
import os
if not os.path.isabs(fn):
from reportlab import rl_config
for D in rl_config.TTFSearchPath:
tfn = os.path.join(D,fn)
if rl_isfile(tfn):
f = open_for_read(tfn,'rb')
return tfn, f
raise TTFError('Can\'t open file "%s"' % fn)
class TTFontParser:
"Basic TTF file parser"
def __init__(self, file, validate=0):
"""Loads and parses a TrueType font file. file can be a filename or a
file object. If validate is set to a false values, skips checksum
validation. This can save time, especially if the font is large.
"""
# Open the file
if type(file) is StringType:
self.filename, file = TTFOpenFile(file)
else:
self.filename = '(ttf)'
self._ttf_data = file.read()
self._pos = 0
# Read header
try:
version = self.read_ulong()
if version == 0x4F54544F:
raise TTFError, 'OpenType fonts with PostScript outlines are not supported'
if version != 0x00010000 and version != 0x74727565:
raise TTFError, 'Not a TrueType font'
except:
raise TTFError, 'Not a TrueType font'
try:
self.numTables = self.read_ushort()
self.searchRange = self.read_ushort()
self.entrySelector = self.read_ushort()
self.rangeShift = self.read_ushort()
# Read table directory
self.table = {}
self.tables = []
for n in range(self.numTables):
record = {}
record['tag'] = self.read_tag()
record['checksum'] = self.read_ulong()
record['offset'] = self.read_ulong()
record['length'] = self.read_ulong()
self.tables.append(record)
self.table[record['tag']] = record
except:
raise TTFError, 'Corrupt TrueType font file'
if not validate:
return
# Check the checksums for the whole file
checkSum = calcChecksum(self._ttf_data)
if add32(_L2U32(0xB1B0AFBAL), -checkSum) != 0:
raise TTFError, 'Invalid checksum %s len: %d &3: %d' % (hex32(checkSum),len(self._ttf_data),(len(self._ttf_data)&3))
# Check the checksums for all tables
for t in self.tables:
table = self.get_chunk(t['offset'], t['length'])
checkSum = calcChecksum(table)
if t['tag'] == 'head':
adjustment = unpack('>l', table[8:8+4])[0]
checkSum = add32(checkSum, -adjustment)
if t['checksum'] != checkSum:
raise TTFError, 'Invalid checksum %s table: %s' % (hex32(checkSum),t['tag'])
def get_table_pos(self, tag):
"Returns the offset and size of a given TTF table."
offset = self.table[tag]['offset']
length = self.table[tag]['length']
return (offset, length)
def seek(self, pos):
"Moves read pointer to a given offset in file."
self._pos = pos
def skip(self, delta):
"Skip the given number of bytes."
self._pos = self._pos + delta
def seek_table(self, tag, offset_in_table = 0):
"""Moves read pointer to the given offset within a given table and
returns absolute offset of that position in the file."""
self._pos = self.get_table_pos(tag)[0] + offset_in_table
return self._pos
def read_tag(self):
"Read a 4-character tag"
self._pos = self._pos + 4
return self._ttf_data[self._pos - 4:self._pos]
def read_ushort(self):
"Reads an unsigned short"
self._pos = self._pos + 2
return (ord(self._ttf_data[self._pos - 2]) << 8) + \
(ord(self._ttf_data[self._pos - 1]))
def read_ulong(self):
"Reads an unsigned long"
self._pos = self._pos + 4
return unpack('>l',self._ttf_data[self._pos - 4:self._pos])[0]
def read_short(self):
"Reads a signed short"
us = self.read_ushort()
if us >= 0x8000:
return us - 0x10000
else:
return us
def get_ushort(self, pos):
"Return an unsigned short at given position"
return (ord(self._ttf_data[pos]) << 8) + \
(ord(self._ttf_data[pos + 1]))
def get_ulong(self, pos):
"Return an unsigned long at given position"
return unpack('>l',self._ttf_data[pos:pos+4])[0]
def get_chunk(self, pos, length):
"Return a chunk of raw data at given position"
return self._ttf_data[pos:pos+length]
def get_table(self, tag):
"Return the given TTF table"
pos, length = self.get_table_pos(tag)
return self._ttf_data[pos:pos+length]
class TTFontMaker:
"Basic TTF file generator"
def __init__(self):
"Initializes the generator."
self.tables = {}
def add(self, tag, data):
"Adds a table to the TTF file."
if tag == 'head':
data = splice(data, 8, '\0\0\0\0')
self.tables[tag] = data
def makeStream(self):
"Finishes the generation and returns the TTF file as a string"
stm = StringIO()
numTables = len(self.tables)
searchRange = 1
entrySelector = 0
while searchRange * 2 <= numTables:
searchRange = searchRange * 2
entrySelector = entrySelector + 1
searchRange = searchRange * 16
rangeShift = numTables * 16 - searchRange
# Header
stm.write(pack(">lHHHH", 0x00010000, numTables, searchRange,
entrySelector, rangeShift))
# Table directory
tables = self.tables.items()
tables.sort() # XXX is this the correct order?
offset = 12 + numTables * 16
for tag, data in tables:
if tag == 'head':
head_start = offset
checksum = calcChecksum(data)
stm.write(tag)
stm.write(pack(">LLL", checksum, offset, len(data)))
paddedLength = (len(data)+3)&~3
offset = offset + paddedLength
# Table data
for tag, data in tables:
data = data + "\0\0\0"
stm.write(data[:len(data)&~3])
checksum = calcChecksum(stm.getvalue())
checksum = add32(_L2U32(0xB1B0AFBAL), -checksum)
stm.seek(head_start + 8)
stm.write(pack('>L', checksum))
return stm.getvalue()
class TTFontFile(TTFontParser):
"TTF file parser and generator"
def __init__(self, file, charInfo=1, validate=0):
"""Loads and parses a TrueType font file.
file can be a filename or a file object. If validate is set to a false
values, skips checksum validation. This can save time, especially if
the font is large. See TTFontFile.extractInfo for more information.
"""
TTFontParser.__init__(self, file, validate=validate)
self.extractInfo(charInfo)
def extractInfo(self, charInfo=1):
"""Extract typographic information from the loaded font file.
The following attributes will be set:
name - PostScript font name
flags - Font flags
ascent - Typographic ascender in 1/1000ths of a point
descent - Typographic descender in 1/1000ths of a point
capHeight - Cap height in 1/1000ths of a point (0 if not available)
bbox - Glyph bounding box [l,t,r,b] in 1/1000ths of a point
italicAngle - Italic angle in degrees ccw
stemV - stem weight in 1/1000ths of a point (approximate)
If charInfo is true, the following will also be set:
defaultWidth - default glyph width in 1/1000ths of a point
charWidths - dictionary of character widths for every supported
UCS character code
This will only work if the font has a Unicode cmap (platform 3,
encoding 1, format 4 or platform 0 any encoding format 4). Setting
charInfo to false avoids this requirement.
"""
# name - Naming table
name_offset = self.seek_table("name")
format = self.read_ushort()
if format != 0:
raise TTFError, "Unknown name table format (%d)" % format
numRecords = self.read_ushort()
string_data_offset = name_offset + self.read_ushort()
names = {1:None,2:None,3:None,4:None,6:None}
K = names.keys()
nameCount = len(names)
for i in range(numRecords):
platformId = self.read_ushort()
encodingId = self.read_ushort()
languageId = self.read_ushort()
nameId = self.read_ushort()
length = self.read_ushort()
offset = self.read_ushort()
if nameId not in K: continue
N = None
if platformId == 3 and encodingId == 1 and languageId == 0x409: # Microsoft, Unicode, US English, PS Name
self.seek(string_data_offset + offset)
if length % 2 != 0:
raise TTFError, "PostScript name is UTF-16BE string of odd length"
length /= 2
N = []
A = N.append
while length > 0:
char = self.read_ushort()
A(chr(char))
length -= 1
N = ''.join(N)
elif platformId == 1 and encodingId == 0 and languageId == 0: # Macintosh, Roman, English, PS Name
# According to OpenType spec, if PS name exists, it must exist
# both in MS Unicode and Macintosh Roman formats. Apparently,
# you can find live TTF fonts which only have Macintosh format.
N = self.get_chunk(string_data_offset + offset, length)
if N and names[nameId]==None:
names[nameId] = N
nameCount -= 1
if nameCount==0: break
psName = names[6]
if not psName:
raise TTFError, "Could not find PostScript font name"
for c in psName:
oc = ord(c)
if oc<33 or oc>126 or c in ('[', ']', '(', ')', '{', '}', '<', '>', '/', '%'):
raise TTFError, "psName contains invalid character '%s' ie U+%04X" % (c,ord(c))
self.name = psName
self.familyName = names[1] or psName
self.styleName = names[2] or 'Regular'
self.fullName = names[4] or psName
self.uniqueFontID = names[3] or psName
# head - Font header table
self.seek_table("head")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj != 1:
raise TTFError, 'Unknown head table version %d.%04x' % (ver_maj, ver_min)
self.skip(8)
magic = self.read_ulong()
if magic != 0x5F0F3CF5:
raise TTFError, 'Invalid head table magic %04x' % magic
self.skip(2)
unitsPerEm = self.read_ushort()
scale = lambda x, unitsPerEm=unitsPerEm: x * 1000 / unitsPerEm
self.skip(16)
xMin = self.read_short()
yMin = self.read_short()
xMax = self.read_short()
yMax = self.read_short()
self.bbox = map(scale, [xMin, yMin, xMax, yMax])
self.skip(3*2)
indexToLocFormat = self.read_ushort()
glyphDataFormat = self.read_ushort()
# OS/2 - OS/2 and Windows metrics table
# (needs data from head table)
if self.table.has_key("OS/2"):
self.seek_table("OS/2")
version = self.read_ushort()
self.skip(2)
usWeightClass = self.read_ushort()
self.skip(2)
fsType = self.read_ushort()
if fsType == 0x0002 or (fsType & 0x0300) != 0:
raise TTFError, 'Font does not allow subsetting/embedding (%04X)' % fsType
self.skip(11*2 + 10 + 4*4 + 4 + 3*2)
sTypoAscender = self.read_short()
sTypoDescender = self.read_short()
self.ascent = scale(sTypoAscender) # XXX: for some reason it needs to be multiplied by 1.24--1.28
self.descent = scale(sTypoDescender)
if version > 1:
self.skip(3*2 + 2*4 + 2)
sCapHeight = self.read_short()
self.capHeight = scale(sCapHeight)
else:
self.capHeight = self.ascent
else:
# Microsoft TTFs require an OS/2 table; Apple ones do not. Try to
# cope. The data is not very important anyway.
usWeightClass = 500
self.ascent = scale(yMax)
self.descent = scale(yMin)
self.capHeight = self.ascent
# There's no way to get stemV from a TTF file short of analyzing actual outline data
# This fuzzy formula is taken from pdflib sources, but we could just use 0 here
self.stemV = 50 + int((usWeightClass / 65.0) ** 2)
# post - PostScript table
# (needs data from OS/2 table)
self.seek_table("post")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj not in (1, 2, 3, 4):
# Adobe/MS documents 1, 2, 2.5, 3; Apple also has 4.
# From Apple docs it seems that we do not need to care
# about the exact version, so if you get this error, you can
# try to remove this check altogether.
raise TTFError, 'Unknown post table version %d.%04x' % (ver_maj, ver_min)
self.italicAngle = self.read_short() + self.read_ushort() / 65536.0
self.skip(2*2)
isFixedPitch = self.read_ulong()
self.flags = FF_SYMBOLIC # All fonts that contain characters
# outside the original Adobe character
# set are considered "symbolic".
if self.italicAngle != 0:
self.flags = self.flags | FF_ITALIC
if usWeightClass >= 600: # FW_REGULAR == 500, FW_SEMIBOLD == 600
self.flags = self.flags | FF_FORCEBOLD
if isFixedPitch:
self.flags = self.flags | FF_FIXED
# XXX: FF_SERIF? FF_SCRIPT? FF_ALLCAP? FF_SMALLCAP?
# hhea - Horizontal header table
self.seek_table("hhea")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj != 1:
raise TTFError, 'Unknown hhea table version %d.%04x' % (ver_maj, ver_min)
self.skip(28)
metricDataFormat = self.read_ushort()
if metricDataFormat != 0:
raise TTFError, 'Unknown horizontal metric data format (%d)' % metricDataFormat
numberOfHMetrics = self.read_ushort()
if numberOfHMetrics == 0:
raise TTFError, 'Number of horizontal metrics is 0'
# maxp - Maximum profile table
self.seek_table("maxp")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj != 1:
raise TTFError, 'Unknown maxp table version %d.%04x' % (ver_maj, ver_min)
numGlyphs = self.read_ushort()
if not charInfo:
self.charToGlyph = None
self.defaultWidth = None
self.charWidths = None
return
if glyphDataFormat != 0:
raise TTFError, 'Unknown glyph data format (%d)' % glyphDataFormat
# cmap - Character to glyph index mapping table
cmap_offset = self.seek_table("cmap")
self.skip(2)
cmapTableCount = self.read_ushort()
unicode_cmap_offset = None
for n in range(cmapTableCount):
platformID = self.read_ushort()
encodingID = self.read_ushort()
offset = self.read_ulong()
if platformID == 3 and encodingID == 1: # Microsoft, Unicode
format = self.get_ushort(cmap_offset + offset)
if format == 4:
unicode_cmap_offset = cmap_offset + offset
break
elif platformID == 0: # Unicode -- assume all encodings are compatible
format = self.get_ushort(cmap_offset + offset)
if format == 4:
unicode_cmap_offset = cmap_offset + offset
break
if unicode_cmap_offset is None:
raise TTFError, 'Font does not have cmap for Unicode (platform 3, encoding 1, format 4 or platform 0 any encoding format 4)'
self.seek(unicode_cmap_offset + 2)
length = self.read_ushort()
limit = unicode_cmap_offset + length
self.skip(2)
segCount = self.read_ushort() / 2
self.skip(6)
endCount = map(lambda x, self=self: self.read_ushort(), range(segCount))
self.skip(2)
startCount = map(lambda x, self=self: self.read_ushort(), range(segCount))
idDelta = map(lambda x, self=self: self.read_short(), range(segCount))
idRangeOffset_start = self._pos
idRangeOffset = map(lambda x, self=self: self.read_ushort(), range(segCount))
# Now it gets tricky.
glyphToChar = {}
charToGlyph = {}
for n in range(segCount):
for unichar in range(startCount[n], endCount[n] + 1):
if idRangeOffset[n] == 0:
glyph = (unichar + idDelta[n]) & 0xFFFF
else:
offset = (unichar - startCount[n]) * 2 + idRangeOffset[n]
offset = idRangeOffset_start + 2 * n + offset
if offset >= limit:
# workaround for broken fonts (like Thryomanes)
glyph = 0
else:
glyph = self.get_ushort(offset)
if glyph != 0:
glyph = (glyph + idDelta[n]) & 0xFFFF
charToGlyph[unichar] = glyph
if glyphToChar.has_key(glyph):
glyphToChar[glyph].append(unichar)
else:
glyphToChar[glyph] = [unichar]
self.charToGlyph = charToGlyph
# hmtx - Horizontal metrics table
# (needs data from hhea, maxp, and cmap tables)
self.seek_table("hmtx")
aw = None
self.charWidths = {}
self.hmetrics = []
for glyph in range(numberOfHMetrics):
# advance width and left side bearing. lsb is actually signed
# short, but we don't need it anyway (except for subsetting)
aw, lsb = self.read_ushort(), self.read_ushort()
self.hmetrics.append((aw, lsb))
aw = scale(aw)
if glyph == 0:
self.defaultWidth = aw
if glyphToChar.has_key(glyph):
for char in glyphToChar[glyph]:
self.charWidths[char] = aw
for glyph in range(numberOfHMetrics, numGlyphs):
# the rest of the table only lists advance left side bearings.
# so we reuse aw set by the last iteration of the previous loop
lsb = self.read_ushort()
self.hmetrics.append((aw, lsb))
if glyphToChar.has_key(glyph):
for char in glyphToChar[glyph]:
self.charWidths[char] = aw
# loca - Index to location
self.seek_table('loca')
self.glyphPos = []
if indexToLocFormat == 0:
for n in range(numGlyphs + 1):
self.glyphPos.append(self.read_ushort() << 1)
elif indexToLocFormat == 1:
for n in range(numGlyphs + 1):
self.glyphPos.append(self.read_ulong())
else:
raise TTFError, 'Unknown location table format (%d)' % indexToLocFormat
# Subsetting
def makeSubset(self, subset):
"""Create a subset of a TrueType font"""
output = TTFontMaker()
# Build a mapping of glyphs in the subset to glyph numbers in
# the original font. Also build a mapping of UCS codes to
# glyph values in the new font.
# Start with 0 -> 0: "missing character"
glyphMap = [0] # new glyph index -> old glyph index
glyphSet = {0:0} # old glyph index -> new glyph index
codeToGlyph = {} # unicode -> new glyph index
for code in subset:
if self.charToGlyph.has_key(code):
originalGlyphIdx = self.charToGlyph[code]
else:
originalGlyphIdx = 0
if not glyphSet.has_key(originalGlyphIdx):
glyphSet[originalGlyphIdx] = len(glyphMap)
glyphMap.append(originalGlyphIdx)
codeToGlyph[code] = glyphSet[originalGlyphIdx]
# Also include glyphs that are parts of composite glyphs
start = self.get_table_pos('glyf')[0]
n = 0
while n < len(glyphMap):
originalGlyphIdx = glyphMap[n]
glyphPos = self.glyphPos[originalGlyphIdx]
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
self.seek(start + glyphPos)
numberOfContours = self.read_short()
if numberOfContours < 0:
# composite glyph
self.skip(8)
flags = GF_MORE_COMPONENTS
while flags & GF_MORE_COMPONENTS:
flags = self.read_ushort()
glyphIdx = self.read_ushort()
if not glyphSet.has_key(glyphIdx):
glyphSet[glyphIdx] = len(glyphMap)
glyphMap.append(glyphIdx)
if flags & GF_ARG_1_AND_2_ARE_WORDS:
self.skip(4)
else:
self.skip(2)
if flags & GF_WE_HAVE_A_SCALE:
self.skip(2)
elif flags & GF_WE_HAVE_AN_X_AND_Y_SCALE:
self.skip(4)
elif flags & GF_WE_HAVE_A_TWO_BY_TWO:
self.skip(8)
n = n + 1
numGlyphs = n = len(glyphMap)
while n > 1 and self.hmetrics[n][0] == self.hmetrics[n - 1][0]:
n = n - 1
numberOfHMetrics = n
# The following tables are simply copied from the original
for tag in ('name', 'OS/2', 'cvt ', 'fpgm', 'prep'):
try:
output.add(tag, self.get_table(tag))
except KeyError:
# Apparently some of the tables are optional (cvt, fpgm, prep).
# The lack of the required ones (name, OS/2) would have already
# been caught before.
pass
# post - PostScript
post = "\x00\x03\x00\x00" + self.get_table('post')[4:16] + "\x00" * 16
output.add('post', post)
# hhea - Horizontal Header
hhea = self.get_table('hhea')
hhea = _set_ushort(hhea, 34, numberOfHMetrics)
output.add('hhea', hhea)
# maxp - Maximum Profile
maxp = self.get_table('maxp')
maxp = _set_ushort(maxp, 4, numGlyphs)
output.add('maxp', maxp)
# cmap - Character to glyph mapping
# XXX maybe use format 0 if possible, not 6?
entryCount = len(subset)
length = 10 + entryCount * 2
cmap = [0, 1, # version, number of tables
1, 0, 0,12, # platform, encoding, offset (hi,lo)
6, length, 0, # format, length, language
0,
entryCount] + \
map(codeToGlyph.get, subset)
cmap = apply(pack, [">%dH" % len(cmap)] + cmap)
output.add('cmap', cmap)
# hmtx - Horizontal Metrics
hmtx = []
for n in range(numGlyphs):
originalGlyphIdx = glyphMap[n]
aw, lsb = self.hmetrics[originalGlyphIdx]
if n < numberOfHMetrics:
hmtx.append(aw)
hmtx.append(lsb)
hmtx = apply(pack, [">%dH" % len(hmtx)] + hmtx)
output.add('hmtx', hmtx)
# glyf - Glyph data
glyphData = self.get_table('glyf')
offsets = []
glyf = []
pos = 0
for n in range(numGlyphs):
offsets.append(pos)
originalGlyphIdx = glyphMap[n]
glyphPos = self.glyphPos[originalGlyphIdx]
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
data = glyphData[glyphPos:glyphPos+glyphLen]
# Fix references in composite glyphs
if glyphLen > 2 and unpack(">h", data[:2])[0] < 0:
# composite glyph
pos_in_glyph = 10
flags = GF_MORE_COMPONENTS
while flags & GF_MORE_COMPONENTS:
flags = unpack(">H", data[pos_in_glyph:pos_in_glyph+2])[0]
glyphIdx = unpack(">H", data[pos_in_glyph+2:pos_in_glyph+4])[0]
data = _set_ushort(data, pos_in_glyph + 2, glyphSet[glyphIdx])
pos_in_glyph = pos_in_glyph + 4
if flags & GF_ARG_1_AND_2_ARE_WORDS:
pos_in_glyph = pos_in_glyph + 4
else:
pos_in_glyph = pos_in_glyph + 2
if flags & GF_WE_HAVE_A_SCALE:
pos_in_glyph = pos_in_glyph + 2
elif flags & GF_WE_HAVE_AN_X_AND_Y_SCALE:
pos_in_glyph = pos_in_glyph + 4
elif flags & GF_WE_HAVE_A_TWO_BY_TWO:
pos_in_glyph = pos_in_glyph + 8
glyf.append(data)
pos = pos + glyphLen
if pos % 4 != 0:
padding = 4 - pos % 4
glyf.append('\0' * padding)
pos = pos + padding
offsets.append(pos)
output.add('glyf', string.join(glyf, ""))
# loca - Index to location
loca = []
if (pos + 1) >> 1 > 0xFFFF:
indexToLocFormat = 1 # long format
for offset in offsets:
loca.append(offset)
loca = apply(pack, [">%dL" % len(loca)] + loca)
else:
indexToLocFormat = 0 # short format
for offset in offsets:
loca.append(offset >> 1)
loca = apply(pack, [">%dH" % len(loca)] + loca)
output.add('loca', loca)
# head - Font header
head = self.get_table('head')
head = _set_ushort(head, 50, indexToLocFormat)
output.add('head', head)
return output.makeStream()
#
# TrueType font embedding
#
# PDF font flags (see PDF Reference Guide table 5.19)
FF_FIXED = 1 << 1-1
FF_SERIF = 1 << 2-1
FF_SYMBOLIC = 1 << 3-1
FF_SCRIPT = 1 << 4-1
FF_NONSYMBOLIC = 1 << 6-1
FF_ITALIC = 1 << 7-1
FF_ALLCAP = 1 << 17-1
FF_SMALLCAP = 1 << 18-1
FF_FORCEBOLD = 1 << 19-1
class TTFontFace(TTFontFile, pdfmetrics.TypeFace):
"""TrueType typeface.
Conceptually similar to a single byte typeface, but the glyphs are
identified by UCS character codes instead of glyph names."""
def __init__(self, filename, validate=0):
"Loads a TrueType font from filename."
pdfmetrics.TypeFace.__init__(self, None)
TTFontFile.__init__(self, filename, validate=validate)
def getCharWidth(self, code):
"Returns the width of character U+<code>"
return self.charWidths.get(code, self.defaultWidth)
def addSubsetObjects(self, doc, fontname, subset):
"""Generate a TrueType font subset and add it to the PDF document.
Returns a PDFReference to the new FontDescriptor object."""
fontFile = pdfdoc.PDFStream()
fontFile.content = self.makeSubset(subset)
fontFile.dictionary['Length1'] = len(fontFile.content)
if doc.compression:
fontFile.filters = [pdfdoc.PDFZCompress]
fontFileRef = doc.Reference(fontFile, 'fontFile:%s(%s)' % (self.filename, fontname))
flags = self.flags & ~ FF_NONSYMBOLIC
flags = flags | FF_SYMBOLIC
fontDescriptor = pdfdoc.PDFDictionary({
'Type': '/FontDescriptor',
'Ascent': self.ascent,
'CapHeight': self.capHeight,
'Descent': self.descent,
'Flags': flags,
'FontBBox': pdfdoc.PDFArray(self.bbox),
'FontName': pdfdoc.PDFName(fontname),
'ItalicAngle': self.italicAngle,
'StemV': self.stemV,
'FontFile2': fontFileRef,
})
return doc.Reference(fontDescriptor, 'fontDescriptor:' + fontname)
class TTEncoding:
"""Encoding for TrueType fonts (always UTF-8).
TTEncoding does not directly participate in PDF object creation, since
we need a number of different 8-bit encodings for every generated font
subset. TTFont itself cares about that."""
def __init__(self):
self.name = "UTF-8"
class TTFont:
"""Represents a TrueType font.
Its encoding is always UTF-8.
Note: you cannot use the same TTFont object for different documents
at the same time.
Example of usage:
font = ttfonts.TTFont('PostScriptFontName', '/path/to/font.ttf')
pdfmetrics.registerFont(font)
canvas.setFont('PostScriptFontName', size)
canvas.drawString(x, y, "Some text encoded in UTF-8")
"""
class State:
def __init__(self):
self.assignments = {}
self.nextCode = 0
self.subsets = []
self.internalName = None
self.frozen = 0
def __init__(self, name, filename, validate=0):
"""Loads a TrueType font from filename.
If validate is set to a false values, skips checksum validation. This
can save time, especially if the font is large.
"""
self.fontName = name
self.face = TTFontFace(filename, validate=validate)
self.encoding = TTEncoding()
self._multiByte = 1 # We want our own stringwidth
self._dynamicFont = 1 # We want dynamic subsetting
self.state = {}
def stringWidth(self, text, size):
"Calculate text width"
width = self.face.getCharWidth
w = 0
for code in parse_utf8(text):
w = w + width(code)
return 0.001 * w * size
def splitString(self, text, doc):
"""Splits text into a number of chunks, each of which belongs to a
single subset. Returns a list of tuples (subset, string). Use subset
numbers with getSubsetInternalName. Doc is needed for distinguishing
subsets when building different documents at the same time."""
try: state = self.state[doc]
except KeyError: state = self.state[doc] = TTFont.State()
curSet = -1
cur = []
results = []
for code in parse_utf8(text):
if state.assignments.has_key(code):
n = state.assignments[code]
else:
if state.frozen:
raise pdfdoc.PDFError, "Font %s is already frozen, cannot add new character U+%04X" % (self.fontName, code)
n = state.nextCode
state.nextCode = state.nextCode + 1
state.assignments[code] = n
if (n & 0xFF) == 0:
state.subsets.append([])
state.subsets[n >> 8].append(code)
if (n >> 8) != curSet:
if cur:
results.append((curSet, string.join(map(chr, cur), "")))
curSet = (n >> 8)
cur = []
cur.append(n & 0xFF)
if cur:
results.append((curSet, string.join(map(chr, cur), "")))
return results
def getSubsetInternalName(self, subset, doc):
"""Returns the name of a PDF Font object corresponding to a given
subset of this dynamic font. Use this function instead of
PDFDocument.getInternalFontName."""
try: state = self.state[doc]
except KeyError: state = self.state[doc] = TTFont.State()
if subset < 0 or subset >= len(state.subsets):
raise IndexError, 'Subset %d does not exist in font %s' % (subset, self.fontName)
if state.internalName is None:
state.internalName = 'F%d' % (len(doc.fontMapping) + 1)
doc.fontMapping[self.fontName] = '/' + state.internalName
doc.delayedFonts.append(self)
return '/%s+%d' % (state.internalName, subset)
def addObjects(self, doc):
"""Makes one or more PDF objects to be added to the document. The
caller supplies the internal name to be used (typically F1, F2, ... in
sequence).
This method creates a number of Font and FontDescriptor objects. Every
FontDescriptor is a (no more than) 256 character subset of the original
TrueType font."""
try: state = self.state[doc]
except KeyError: state = self.state[doc] = TTFont.State()
state.frozen = 1
for n in range(len(state.subsets)):
subset = state.subsets[n]
internalName = self.getSubsetInternalName(n, doc)[1:]
baseFontName = "SUBSET+%s+%d" % (self.face.name, n)
pdfFont = pdfdoc.PDFTrueTypeFont()
pdfFont.__Comment__ = 'Font %s subset %d' % (self.fontName, n)
pdfFont.Name = internalName
pdfFont.BaseFont = baseFontName
pdfFont.FirstChar = 0
pdfFont.LastChar = len(subset) - 1
widths = map(self.face.getCharWidth, subset)
pdfFont.Widths = pdfdoc.PDFArray(widths)
cmapStream = pdfdoc.PDFStream()
cmapStream.content = makeToUnicodeCMap(baseFontName, subset)
if doc.compression:
cmapStream.filters = [pdfdoc.PDFZCompress]
pdfFont.ToUnicode = doc.Reference(cmapStream, 'toUnicodeCMap:' + baseFontName)
pdfFont.FontDescriptor = self.face.addSubsetObjects(doc, baseFontName, subset)
# link it in
ref = doc.Reference(pdfFont, internalName)
fontDict = doc.idToObject['BasicFonts'].dict
fontDict[internalName] = pdfFont
del self.state[doc]
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import logging
import time
import redis
import redis.exceptions
import MySQLdb
import config
import data
import exceptions
import log
class Db(object):
db_mem = None
db_mem_posts = None
db_disk_posts = None
db_cursor = None
retries = 360
retry_wait = 10
cmd_retries = 10
cmd_retry_wait = 10
def __init__(self):
c = config.Config()
self.config = c.cfg
self.log = logging.getLogger('db')
self.dir_root = self.config.get('trends', 'root')
def setup(self):
"""
Setup the connection to Redis DB and to MySQL DB.
"""
self.setup_redis()
self.setup_mysql_loop()
# Get marker to know if a post id is in Redis or MySQL.
self.posts_tid = int(self.get('posts_tid'))
def setup_redis(self):
"""Connections to Redis."""
host = self.config.get('redis', 'host')
port = self.config.getint('redis', 'port')
self.db_mem = redis.Redis(host=host, port=port, db=0)
self.db_mem_posts = redis.Redis(host=host, port=port, db=1)
def setup_mysql_loop(self):
"""Setup connection to Redis until it succeeds"""
retry = 0
while retry < self.retries:
try:
self.setup_mysql()
return
except exceptions.DbError:
if retry < self.retries:
time.sleep(self.retry_wait)
retry += 1
self.log.error(
'%d retries to connect to MySQL failed', self.retries)
raise exceptions.DbError()
def setup_mysql(self):
"""Setup connections to MySQL"""
user = self.config.get('mysql', 'user')
password = self.config.get('mysql', 'password')
db = self.config.get('mysql', 'db')
host = self.config.get('mysql', 'host')
try:
self.db_disk_posts = MySQLdb.connect(host=host,
user=user, passwd=password, db=db,
use_unicode=True, charset='utf8')
self.db_cursor = self.db_disk_posts.cursor()
except MySQLdb.Error:
self.log.error('Problem to connect to MySQL host %s', host)
raise exceptions.DbError()
def redis_cmd(self, cmd, *args):
"""Redis command to DB index 0"""
return self.redis_command(0, cmd, *args)
def redis_cmd_db_1(self, cmd, *args):
"""Redis command to DB index 1"""
return self.redis_command(1, cmd, *args)
def redis_command(self, db, cmd, *args):
"""Command to Redis.
Try cmd_retries times.
"""
if db == 0:
dbr = self.db_mem
else:
dbr = self.db_mem_posts
retry = 0
while retry < self.cmd_retries:
try:
return getattr(dbr, cmd)(*args)
except redis.exceptions.RedisError:
self.log.error('Redis cmd %s error', cmd)
retry += 1
if retry <= self.cmd_retries:
time.sleep(self.cmd_retry_wait)
except AttributeError:
self.log.error('Redis cmd %s does not exist', cmd)
raise exceptions.DbError()
raise exceptions.DbError()
def get(self, key, db= 0):
if db == 0:
return self.redis_cmd('get', key)
else:
return self.redis_cmd_db_1('get', key)
def set(self, key, value, db=0):
if db == 0:
return self.redis_cmd('set', key, value)
else:
return self.redis_cmd_db_1('set', key, value)
def delete(self, key):
return self.redis_cmd('delete', key)
def exists(self, key):
return self.redis_cmd('exists', key)
def incr(self, key):
return self.redis_cmd('incr', key)
def rpush(self, key, value):
return self.redis_cmd('rpush', key, value)
def lrange(self, key, start, stop):
return self.redis_cmd('lrange', key, start, stop)
def lset(self, key, index, value):
return self.redis_cmd('lset', key, index, value)
def lindex(self, key, index):
return self.redis_cmd('lindex', key, index)
def mysql_command(self, cmd, sql, writer, commit, *args):
"""Command to MySQL.
Try cmd_retries times."""
retry = 0
while retry < self.cmd_retries:
try:
r = getattr(self.db_cursor, cmd)(sql, args)
if writer:
if commit:
self.db_disk_posts.commit()
return r
else:
return self.db_cursor.fetchall()
except (MySQLdb.OperationalError, MySQLdb.InternalError):
self.log.error('MySQL cmd %s DB error', cmd)
# reconnect
self.setup_mysql_loop()
retry = 0
except MySQLdb.Error:
self.log.error('MySQL cmd %s sql %s failed', cmd, sql)
retry += 1
if retry <= self.cmd_retries:
time.sleep(self.cmd_retry_wait)
except AttributeError:
self.log.error('MySQL cmd %s does not exist', cmd)
raise exceptions.DbError()
raise exceptions.DbError()
def sql_read(self, sql, *args):
"""Read command to MySQL."""
return self.mysql_command('execute', sql, False, False, *args)
def sql_write(self, sql, *args):
"""Write command to MySQL."""
return self.mysql_command('execute', sql, True, True, *args)
def sql_write_no_commit(self, sql, *args):
"""Write command to MySQL but no commit."""
return self.mysql_command('execute', sql, True, False, *args)
def sql_commit(self):
"""Commit changes to disk"""
self.db_disk_posts.commit()
def set_post(self, post_id, value):
"""Add/Update post value in Redis or MySQL based on posts id marker...
"""
if post_id >= self.posts_tid:
self.set('post:%d' % (post_id,), value, db=1)
else:
sql = 'insert into tp_post(post_id, post) values(%s, %s)'\
'on duplicate key update post=%s'
self.sql_write(sql, post_id, value, value)
def add_post(self, post_id, value):
"""Add post in MySQL
"""
sql = 'insert into tp_post(post_id, post) values(%s, %s)'
self.sql_write(sql, post_id, value)
def get_post(self, post_id):
"""Get post value from Redis or MySQL based on posts id marker...
"""
if post_id >= self.posts_tid:
r = self.get('post:%d' % (post_id,), db=1)
else:
try:
sql = 'select post from tp_post where post_id=%s'
r = self.sql_read(sql, post_id)
except exceptions.DbError:
r = None
return r
def set_person_score(self, post_id, person_id, score):
"""Set the person's sentiment score based on the tweet
"""
sql = 'insert into tp_person_score(post_id, person_id, score) values(%s, %s, %s)'
self.sql_write(sql, post_id, person_id, str(score))
def get_persons(self):
"""
Get list of persons from db
"""
names = self.redis_cmd('lrange', 'persons', 0, -1)
persons = []
for n in names:
s = n.split(':')
person = {}
person['id'] = int(s[0])
person['first_name'] = s[1]
person['name'] = s[2]
person['nickname'] = s[3]
person['group'] = int(s[4])
person['words'] = json.loads(s[5])
pc = self.lindex('person:%d:posts_count' % int(s[0]), -1)
posts_count = int((pc if pc else 0))
person['posts_count'] = (posts_count if posts_count > 0 else 0)
rels = self.lindex('person:%d:rel' % int(s[0]), -1)
person['rel'] = json.loads((rels if rels else '{}'))
sentiment = self.lindex('person:%d:sentiment' % int(s[0]), -1)
person['sentiment'] = float((sentiment if sentiment else 0))
sentiment_avg = self.get('person:%d:sentiment_avg' % int(s[0]))
person['sentiment_avg'] = float((sentiment_avg if sentiment_avg else 0.0))
sentiment_total = self.get('person:%d:sentiment_total_count' % int(s[0]))
person['sentiment_total_count'] = int((sentiment_total if sentiment_total else 0))
persons.append(person)
return persons
def set_persons(self):
"""
Set list of persons in db
"""
key = 'persons'
self.redis_cmd('delete', key)
with open('%s/names.txt' % (self.dir_root), 'r') as f:
for line in f:
self.redis_cmd('rpush', key, line.rstrip('\n'))
def iter_posts(self):
post_id_start = 108673
post_id_end = 8561087
last_id = post_id_start
while True:
sql = 'select post_id, post from tp_post'\
' where post_id > %s and post_id <= %s order by post_id'\
' limit 1000'
rows = self.sql_read(sql, last_id, post_id_end)
if not rows:
break
last_id = rows[-1][0]
r = []
for row in rows:
d = data.parse_post(row[1])
d['post_id'] = row[0]
r.append(d)
yield r
def get_person_ids_from_post_id(self, post_id):
sql = 'select person_id from tp_person_post where post_id = %s'
rows = self.sql_read(sql, post_id)
return [row[0] for row in rows]
|
|
# -*- coding: utf-8 -*-
import sys
sys.path[0:0] = [""]
import unittest
from mongoengine import *
from mongoengine.connection import get_db
__all__ = ("GeoFieldTest", )
@unittest.skip("geo fields not implemented")
class GeoFieldTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
def _test_for_expected_error(self, Cls, loc, expected):
try:
Cls(loc=loc).validate()
self.fail()
except ValidationError, e:
self.assertEqual(expected, e.to_dict()['loc'])
def test_geopoint_validation(self):
class Location(Document):
loc = GeoPointField()
invalid_coords = [{"x": 1, "y": 2}, 5, "a"]
expected = 'GeoPointField can only accept tuples or lists of (x, y)'
for coord in invalid_coords:
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[], [1], [1, 2, 3]]
for coord in invalid_coords:
expected = "Value (%s) must be a two-dimensional point" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[{}, {}], ("a", "b")]
for coord in invalid_coords:
expected = "Both values (%s) in point must be float or int" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
def test_point_validation(self):
class Location(Document):
loc = PointField()
invalid_coords = {"x": 1, "y": 2}
expected = 'PointField can only accept a valid GeoJson dictionary or lists of (x, y)'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": []}
expected = 'PointField type must be "Point"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "Point", "coordinates": [1, 2, 3]}
expected = "Value ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [5, "a"]
expected = "PointField can only accept lists of [x, y]"
for coord in invalid_coords:
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[], [1], [1, 2, 3]]
for coord in invalid_coords:
expected = "Value (%s) must be a two-dimensional point" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[{}, {}], ("a", "b")]
for coord in invalid_coords:
expected = "Both values (%s) in point must be float or int" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
Location(loc=[1, 2]).validate()
def test_linestring_validation(self):
class Location(Document):
loc = LineStringField()
invalid_coords = {"x": 1, "y": 2}
expected = 'LineStringField can only accept a valid GeoJson dictionary or lists of (x, y)'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'LineStringField type must be "LineString"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "LineString", "coordinates": [[1, 2, 3]]}
expected = "Invalid LineString:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [5, "a"]
expected = "Invalid LineString must contain at least one valid point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[1]]
expected = "Invalid LineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0])
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[1, 2, 3]]
expected = "Invalid LineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0])
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[{}, {}]], [("a", "b")]]
for coord in invalid_coords:
expected = "Invalid LineString:\nBoth values (%s) in point must be float or int" % repr(coord[0])
self._test_for_expected_error(Location, coord, expected)
Location(loc=[[1, 2], [3, 4], [5, 6], [1,2]]).validate()
def test_polygon_validation(self):
class Location(Document):
loc = PolygonField()
invalid_coords = {"x": 1, "y": 2}
expected = 'PolygonField can only accept a valid GeoJson dictionary or lists of (x, y)'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'PolygonField type must be "Polygon"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "Polygon", "coordinates": [[[1, 2, 3]]]}
expected = "Invalid Polygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[5, "a"]]]
expected = "Invalid Polygon:\nBoth values ([5, 'a']) in point must be float or int"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[]]]
expected = "Invalid Polygon must contain at least one valid linestring"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1, 2, 3]]]
expected = "Invalid Polygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[{}, {}]], [("a", "b")]]
expected = "Invalid Polygon:\nBoth values ([{}, {}]) in point must be float or int, Both values (('a', 'b')) in point must be float or int"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1, 2], [3, 4]]]
expected = "Invalid Polygon:\nLineStrings must start and end at the same point"
self._test_for_expected_error(Location, invalid_coords, expected)
Location(loc=[[[1, 2], [3, 4], [5, 6], [1, 2]]]).validate()
def test_indexes_geopoint(self):
"""Ensure that indexes are created automatically for GeoPointFields.
"""
class Event(Document):
title = StringField()
location = GeoPointField()
geo_indicies = Event._geo_indices()
self.assertEqual(geo_indicies, [{'fields': [('location', '2d')]}])
def test_geopoint_embedded_indexes(self):
"""Ensure that indexes are created automatically for GeoPointFields on
embedded documents.
"""
class Venue(EmbeddedDocument):
location = GeoPointField()
name = StringField()
class Event(Document):
title = StringField()
venue = EmbeddedDocumentField(Venue)
geo_indicies = Event._geo_indices()
self.assertEqual(geo_indicies, [{'fields': [('venue.location', '2d')]}])
def test_indexes_2dsphere(self):
"""Ensure that indexes are created automatically for GeoPointFields.
"""
class Event(Document):
title = StringField()
point = PointField()
line = LineStringField()
polygon = PolygonField()
geo_indicies = Event._geo_indices()
self.assertTrue({'fields': [('line', '2dsphere')]} in geo_indicies)
self.assertTrue({'fields': [('polygon', '2dsphere')]} in geo_indicies)
self.assertTrue({'fields': [('point', '2dsphere')]} in geo_indicies)
def test_indexes_2dsphere_embedded(self):
"""Ensure that indexes are created automatically for GeoPointFields.
"""
class Venue(EmbeddedDocument):
name = StringField()
point = PointField()
line = LineStringField()
polygon = PolygonField()
class Event(Document):
title = StringField()
venue = EmbeddedDocumentField(Venue)
geo_indicies = Event._geo_indices()
self.assertTrue({'fields': [('venue.line', '2dsphere')]} in geo_indicies)
self.assertTrue({'fields': [('venue.polygon', '2dsphere')]} in geo_indicies)
self.assertTrue({'fields': [('venue.point', '2dsphere')]} in geo_indicies)
def test_geo_indexes_recursion(self):
class Location(Document):
name = StringField()
location = GeoPointField()
class Parent(Document):
name = StringField()
location = ReferenceField(Location)
Location.drop_collection()
Parent.drop_collection()
list(Parent.objects)
collection = Parent._get_collection()
info = collection.index_information()
self.assertFalse('location_2d' in info)
self.assertEqual(len(Parent._geo_indices()), 0)
self.assertEqual(len(Location._geo_indices()), 1)
def test_geo_indexes_auto_index(self):
# Test just listing the fields
class Log(Document):
location = PointField(auto_index=False)
datetime = DateTimeField()
meta = {
'indexes': [[("location", "2dsphere"), ("datetime", 1)]]
}
self.assertEqual([], Log._geo_indices())
Log.drop_collection()
Log.ensure_indexes()
info = Log._get_collection().index_information()
self.assertEqual(info["location_2dsphere_datetime_1"]["key"],
[('location', '2dsphere'), ('datetime', 1)])
# Test listing explicitly
class Log(Document):
location = PointField(auto_index=False)
datetime = DateTimeField()
meta = {
'indexes': [
{'fields': [("location", "2dsphere"), ("datetime", 1)]}
]
}
self.assertEqual([], Log._geo_indices())
Log.drop_collection()
Log.ensure_indexes()
info = Log._get_collection().index_information()
self.assertEqual(info["location_2dsphere_datetime_1"]["key"],
[('location', '2dsphere'), ('datetime', 1)])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for Backward differentiation formula (BDF) solver."""
import collections
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensorshape_util
MAX_ORDER = 5
ORDERS = np.arange(0, MAX_ORDER + 1)
RECIPROCAL_SUMS = np.concatenate([[np.nan], np.cumsum(1. / ORDERS[1:])])
def error_ratio(backward_difference, error_coefficient, tol):
"""Computes the ratio of the error in the computed state to the tolerance."""
tol_cast = tf.cast(tol, backward_difference.dtype)
error_ratio_ = tf.norm(error_coefficient * backward_difference / tol_cast)
return tf.cast(error_ratio_, tf.abs(backward_difference).dtype)
def first_step_size(
atol,
first_order_error_coefficient,
initial_state_vec,
initial_time,
ode_fn_vec,
rtol,
safety_factor,
epsilon=1e-12,
max_step_size=1.,
min_step_size=1e-12,
):
"""Selects the first step size to use."""
next_time = initial_time + epsilon
first_derivative = ode_fn_vec(initial_time, initial_state_vec)
state_dtype = initial_state_vec.dtype
next_state_vec = initial_state_vec + first_derivative * epsilon
second_derivative = (ode_fn_vec(next_time, next_state_vec) -
first_derivative) / epsilon
tol = tf.cast(atol + rtol * tf.abs(initial_state_vec), state_dtype)
# Local truncation error of an order one step is
# `err(step_size) = first_order_error_coefficient * second_derivative *
# * step_size**2`.
# Choose the largest `step_size` such that `norm(err(step_size) / tol) <= 1`.
norm = tf.norm(first_order_error_coefficient * second_derivative / tol)
step_size = tf.cast(tf.math.rsqrt(norm), tf.abs(initial_state_vec).dtype)
return tf.clip_by_value(safety_factor * step_size, min_step_size,
max_step_size)
def interpolate_backward_differences(backward_differences, order,
step_size_ratio):
"""Updates backward differences when a change in the step size occurs."""
state_dtype = backward_differences.dtype
interpolation_matrix_ = interpolation_matrix(state_dtype, order,
step_size_ratio)
interpolation_matrix_unit_step_size_ratio = interpolation_matrix(
state_dtype, order, 1.)
interpolated_backward_differences_orders_one_to_five = tf.matmul(
interpolation_matrix_unit_step_size_ratio,
tf.matmul(interpolation_matrix_, backward_differences[1:MAX_ORDER + 1]))
interpolated_backward_differences = tf.concat([
tf.gather(backward_differences, [0]),
interpolated_backward_differences_orders_one_to_five,
ps.zeros(
ps.stack([2, ps.shape(backward_differences)[1]]), dtype=state_dtype),
], 0)
return interpolated_backward_differences
def interpolation_matrix(dtype, order, step_size_ratio):
"""Creates the matrix used to interpolate backward differences."""
orders = tf.cast(tf.range(1, MAX_ORDER + 1), dtype=dtype)
i = orders[:, tf.newaxis]
j = orders[tf.newaxis, :]
# Matrix whose (i, j)-th entry (`1 <= i, j <= order`) is
# `1/j! (0 - i * step_size_ratio) * ... * ((j-1) - i * step_size_ratio)`.
step_size_ratio_cast = tf.cast(step_size_ratio, dtype)
full_interpolation_matrix = tf.math.cumprod(
((j - 1) - i * step_size_ratio_cast) / j, axis=1)
zeros_matrix = tf.zeros_like(full_interpolation_matrix)
interpolation_matrix_ = tf1.where(
tf.range(1, MAX_ORDER + 1) <= order,
tf.transpose(
tf1.where(
tf.range(1, MAX_ORDER + 1) <= order,
tf.transpose(full_interpolation_matrix), zeros_matrix)),
zeros_matrix)
return interpolation_matrix_
def newton(backward_differences, max_num_iters, newton_coefficient, ode_fn_vec,
order, step_size, time, tol, unitary, upper):
"""Runs Newton's method to solve the BDF equation."""
initial_guess = tf.reduce_sum(
tf1.where(
tf.range(MAX_ORDER + 1) <= order,
backward_differences[:MAX_ORDER + 1],
tf.zeros_like(backward_differences)[:MAX_ORDER + 1]),
axis=0)
np_dtype = np_dtype = dtype_util.as_numpy_dtype(backward_differences.dtype)
rhs_constant_term = newton_coefficient * tf.reduce_sum(
tf1.where(
tf.range(1, MAX_ORDER + 1) <= order,
RECIPROCAL_SUMS[1:, np.newaxis].astype(np_dtype) *
backward_differences[1:MAX_ORDER + 1],
tf.zeros_like(backward_differences)[1:MAX_ORDER + 1]),
axis=0)
next_time = time + step_size
step_size_cast = tf.cast(step_size, backward_differences.dtype)
real_dtype = tf.abs(backward_differences).dtype
def newton_body(iterand):
"""Performs one iteration of Newton's method."""
next_backward_difference = iterand.next_backward_difference
next_state_vec = iterand.next_state_vec
rhs = newton_coefficient * step_size_cast * ode_fn_vec(
next_time,
next_state_vec) - rhs_constant_term - next_backward_difference
delta = tf.squeeze(
tf.linalg.triangular_solve(
upper,
tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),
lower=False))
num_iters = iterand.num_iters + 1
next_backward_difference += delta
next_state_vec += delta
delta_norm = tf.cast(tf.norm(delta), real_dtype)
lipschitz_const = delta_norm / iterand.prev_delta_norm
# Stop if method has converged.
approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm
close_to_sol = approx_dist_to_sol < tol
delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))
converged = close_to_sol | delta_norm_is_zero
finished = converged
# Stop if any of the following conditions are met:
# (A) We have hit the maximum number of iterations.
# (B) The method is converging too slowly.
# (C) The method is not expected to converge.
too_slow = lipschitz_const > 1.
finished = finished | too_slow
if max_num_iters is not None:
too_many_iters = tf.equal(num_iters, max_num_iters)
num_iters_left = max_num_iters - num_iters
num_iters_left_cast = tf.cast(num_iters_left, real_dtype)
wont_converge = (
approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)
finished = finished | too_many_iters | wont_converge
return [
_NewtonIterand(
converged=converged,
finished=finished,
next_backward_difference=next_backward_difference,
next_state_vec=next_state_vec,
num_iters=num_iters,
prev_delta_norm=delta_norm)
]
iterand = _NewtonIterand(
converged=False,
finished=False,
next_backward_difference=tf.zeros_like(initial_guess),
next_state_vec=tf.identity(initial_guess),
num_iters=0,
prev_delta_norm=tf.constant(np.array(-0.), dtype=real_dtype))
[iterand] = tf.while_loop(lambda iterand: tf.logical_not(iterand.finished),
newton_body, [iterand])
return (iterand.converged, iterand.next_backward_difference,
iterand.next_state_vec, iterand.num_iters)
_NewtonIterand = collections.namedtuple('NewtonIterand', [
'converged',
'finished',
'next_backward_difference',
'next_state_vec',
'num_iters',
'prev_delta_norm',
])
def newton_qr(jacobian_mat, newton_coefficient, step_size):
"""QR factorizes the matrix used in each iteration of Newton's method."""
identity = tf.eye(ps.shape(jacobian_mat)[0], dtype=jacobian_mat.dtype)
step_size_cast = tf.cast(step_size, jacobian_mat.dtype)
newton_matrix = (
identity - step_size_cast * newton_coefficient * jacobian_mat)
factorization = tf.linalg.qr(newton_matrix)
return factorization.q, factorization.r
def update_backward_differences(backward_differences, next_backward_difference,
next_state_vec, order):
"""Returns the backward differences for the next time."""
backward_differences_array = tf.TensorArray(
backward_differences.dtype,
size=MAX_ORDER + 3,
clear_after_read=False,
element_shape=next_backward_difference.shape).unstack(
backward_differences)
new_backward_differences_array = tf.TensorArray(
backward_differences.dtype,
size=MAX_ORDER + 3,
clear_after_read=False,
element_shape=next_backward_difference.shape)
new_backward_differences_array = new_backward_differences_array.write(
order + 2,
next_backward_difference - backward_differences_array.read(order + 1))
new_backward_differences_array = new_backward_differences_array.write(
order + 1, next_backward_difference)
def body(k, new_backward_differences_array_):
new_backward_differences_array_k = (
new_backward_differences_array_.read(k + 1) +
backward_differences_array.read(k))
new_backward_differences_array_ = new_backward_differences_array_.write(
k, new_backward_differences_array_k)
return k - 1, new_backward_differences_array_
_, new_backward_differences_array = tf.while_loop(
lambda k, new_backward_differences_array: k > 0, body,
[order, new_backward_differences_array])
new_backward_differences_array = new_backward_differences_array.write(
0, next_state_vec)
new_backward_differences = new_backward_differences_array.stack()
tensorshape_util.set_shape(new_backward_differences,
tf.TensorShape([MAX_ORDER + 3, None]))
return new_backward_differences
|
|
# Copyright (C) 2012 Apple. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
LLDB Support for WebKit Types
Add the following to your .lldbinit file to add WebKit Type summaries in LLDB and Xcode:
command script import {Path to WebKit Root}/Tools/lldb/lldb_webkit.py
"""
import lldb
def __lldb_init_module(debugger, dict):
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFString_SummaryProvider WTF::String')
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFStringImpl_SummaryProvider WTF::StringImpl')
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFAtomicString_SummaryProvider WTF::AtomicString')
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFVector_SummaryProvider -x "WTF::Vector<.+>$"')
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFHashTable_SummaryProvider -x "WTF::HashTable<.+>$"')
debugger.HandleCommand('type synthetic add -x "WTF::Vector<.+>$" --python-class lldb_webkit.WTFVectorProvider')
debugger.HandleCommand('type synthetic add -x "WTF::HashTable<.+>$" --python-class lldb_webkit.WTFHashTableProvider')
debugger.HandleCommand('type summary add -F lldb_webkit.WebCoreLayoutUnit_SummaryProvider WebCore::LayoutUnit')
debugger.HandleCommand('type summary add -F lldb_webkit.WebCoreLayoutSize_SummaryProvider WebCore::LayoutSize')
debugger.HandleCommand('type summary add -F lldb_webkit.WebCoreLayoutPoint_SummaryProvider WebCore::LayoutPoint')
def WTFString_SummaryProvider(valobj, dict):
provider = WTFStringProvider(valobj, dict)
return "{ length = %d, contents = '%s' }" % (provider.get_length(), provider.to_string())
def WTFStringImpl_SummaryProvider(valobj, dict):
provider = WTFStringImplProvider(valobj, dict)
return "{ length = %d, is8bit = %d, contents = '%s' }" % (provider.get_length(), provider.is_8bit(), provider.to_string())
def WTFAtomicString_SummaryProvider(valobj, dict):
return WTFString_SummaryProvider(valobj.GetChildMemberWithName('m_string'), dict)
def WTFVector_SummaryProvider(valobj, dict):
provider = WTFVectorProvider(valobj, dict)
return "{ size = %d, capacity = %d }" % (provider.size, provider.capacity)
def WTFHashTable_SummaryProvider(valobj, dict):
provider = WTFHashTableProvider(valobj, dict)
return "{ tableSize = %d, keyCount = %d }" % (provider.tableSize(), provider.keyCount())
def WebCoreLayoutUnit_SummaryProvider(valobj, dict):
provider = WebCoreLayoutUnitProvider(valobj, dict)
return "{ %s }" % provider.to_string()
def WebCoreLayoutSize_SummaryProvider(valobj, dict):
provider = WebCoreLayoutSizeProvider(valobj, dict)
return "{ width = %s, height = %s }" % (provider.get_width(), provider.get_height())
def WebCoreLayoutPoint_SummaryProvider(valobj, dict):
provider = WebCoreLayoutPointProvider(valobj, dict)
return "{ x = %s, y = %s }" % (provider.get_x(), provider.get_y())
# FIXME: Provide support for the following types:
# def WTFCString_SummaryProvider(valobj, dict):
# def WebCoreKURLGooglePrivate_SummaryProvider(valobj, dict):
# def WebCoreQualifiedName_SummaryProvider(valobj, dict):
# def JSCIdentifier_SummaryProvider(valobj, dict):
# def JSCJSString_SummaryProvider(valobj, dict):
def guess_string_length(valobj, error):
if not valobj.GetValue():
return 0
for i in xrange(0, 2048):
if valobj.GetPointeeData(i, 1).GetUnsignedInt16(error, 0) == 0:
return i
return 256
def ustring_to_string(valobj, error, length=None):
if length is None:
length = guess_string_length(valobj, error)
else:
length = int(length)
out_string = u""
for i in xrange(0, length):
char_value = valobj.GetPointeeData(i, 1).GetUnsignedInt16(error, 0)
out_string = out_string + unichr(char_value)
return out_string.encode('utf-8')
def lstring_to_string(valobj, error, length=None):
if length is None:
length = guess_string_length(valobj, error)
else:
length = int(length)
out_string = u""
for i in xrange(0, length):
char_value = valobj.GetPointeeData(i, 1).GetUnsignedInt8(error, 0)
out_string = out_string + unichr(char_value)
return out_string.encode('utf-8')
class WTFStringImplProvider:
def __init__(self, valobj, dict):
self.valobj = valobj
def get_length(self):
return self.valobj.GetChildMemberWithName('m_length').GetValueAsUnsigned(0)
def get_data8(self):
return self.valobj.GetChildAtIndex(2).GetChildMemberWithName('m_data8')
def get_data16(self):
return self.valobj.GetChildAtIndex(2).GetChildMemberWithName('m_data16')
def to_string(self):
error = lldb.SBError()
if self.is_8bit():
return lstring_to_string(self.get_data8(), error, self.get_length())
return ustring_to_string(self.get_data16(), error, self.get_length())
def is_8bit(self):
# FIXME: find a way to access WTF::StringImpl::s_hashFlag8BitBuffer
return bool(self.valobj.GetChildMemberWithName('m_hashAndFlags').GetValueAsUnsigned(0) \
& 1 << 6)
class WTFStringProvider:
def __init__(self, valobj, dict):
self.valobj = valobj
def stringimpl(self):
impl_ptr = self.valobj.GetChildMemberWithName('m_impl').GetChildMemberWithName('m_ptr')
return WTFStringImplProvider(impl_ptr, dict)
def get_length(self):
impl = self.stringimpl()
if not impl:
return 0
return impl.get_length()
def to_string(self):
impl = self.stringimpl()
if not impl:
return u""
return impl.to_string()
class WebCoreLayoutUnitProvider:
"Print a WebCore::LayoutUnit"
def __init__(self, valobj, dict):
self.valobj = valobj
def to_string(self):
return "%gpx" % (self.valobj.GetChildMemberWithName('m_value').GetValueAsUnsigned(0) / 64.0)
class WebCoreLayoutSizeProvider:
"Print a WebCore::LayoutSize"
def __init__(self, valobj, dict):
self.valobj = valobj
def get_width(self):
return WebCoreLayoutUnitProvider(self.valobj.GetChildMemberWithName('m_width'), dict).to_string()
def get_height(self):
return WebCoreLayoutUnitProvider(self.valobj.GetChildMemberWithName('m_height'), dict).to_string()
class WebCoreLayoutPointProvider:
"Print a WebCore::LayoutPoint"
def __init__(self, valobj, dict):
self.valobj = valobj
def get_x(self):
return WebCoreLayoutUnitProvider(self.valobj.GetChildMemberWithName('m_x'), dict).to_string()
def get_y(self):
return WebCoreLayoutUnitProvider(self.valobj.GetChildMemberWithName('m_y'), dict).to_string()
class WTFVectorProvider:
def __init__(self, valobj, internal_dict):
self.valobj = valobj
self.update()
def num_children(self):
return self.size + 3
def get_child_index(self, name):
if name == "m_size":
return self.size
elif name == "m_capacity":
return self.size + 1
elif name == "m_buffer":
return self.size + 2
else:
return int(name.lstrip('[').rstrip(']'))
def get_child_at_index(self, index):
if index == self.size:
return self.valobj.GetChildMemberWithName("m_size")
elif index == self.size + 1:
return self.valobj.GetChildMemberWithName("m_capacity")
elif index == self.size + 2:
return self.buffer
elif index < self.size:
offset = index * self.data_size
child = self.buffer.CreateChildAtOffset('[' + str(index) + ']', offset, self.data_type)
return child
else:
return None
def update(self):
self.buffer = self.valobj.GetChildMemberWithName('m_buffer')
self.size = self.valobj.GetChildMemberWithName('m_size').GetValueAsUnsigned(0)
self.capacity = self.buffer.GetChildMemberWithName('m_capacity').GetValueAsUnsigned(0)
self.data_type = self.buffer.GetType().GetPointeeType()
self.data_size = self.data_type.GetByteSize()
def has_children(self):
return True
class WTFHashTableProvider:
def __init__(self, valobj, internal_dict):
self.valobj = valobj
self.update()
def num_children(self):
return self.tableSize() + 5
def get_child_index(self, name):
if name == "m_table":
return self.tableSize()
elif name == "m_tableSize":
return self.tableSize() + 1
elif name == "m_tableSizeMask":
return self.tableSize() + 2
elif name == "m_keyCount":
return self.tableSize() + 3
elif name == "m_deletedCount":
return self.tableSize() + 4
else:
return int(name.lstrip('[').rstrip(']'))
def get_child_at_index(self, index):
if index == self.tableSize():
return self.valobj.GetChildMemberWithName('m_table')
elif index == self.tableSize() + 1:
return self.valobj.GetChildMemberWithName('m_tableSize')
elif index == self.tableSize() + 2:
return self.valobj.GetChildMemberWithName('m_tableSizeMask')
elif index == self.tableSize() + 3:
return self.valobj.GetChildMemberWithName('m_keyCount')
elif index == self.tableSize() + 4:
return self.valobj.GetChildMemberWithName('m_deletedCount')
elif index < self.tableSize():
table = self.valobj.GetChildMemberWithName('m_table')
return table.CreateChildAtOffset('[' + str(index) + ']', index * self.data_size, self.data_type)
else:
return None
def tableSize(self):
return self.valobj.GetChildMemberWithName('m_tableSize').GetValueAsUnsigned(0)
def keyCount(self):
return self.valobj.GetChildMemberWithName('m_keyCount').GetValueAsUnsigned(0)
def update(self):
self.data_type = self.valobj.GetType().GetTemplateArgumentType(0)
self.data_size = self.data_type.GetByteSize()
def has_children(self):
return True
|
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
|
from collections import defaultdict
from pathlib import Path as p
import rdflib
from rdflib import *
from ilxutils.tools import open_pickle, create_pickle
from ilxutils.interlex_sql import IlxSql
import pickle
import os
from typing import *
class rdfGraph(Graph):
''' Adds needed functions to rdflib.Graph '''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Need bnodes to be saved to keep them as an entity
self.axiom_triple_2_bnode = {} # (triple): BNode
def add_annotation(self,
subj: URIRef,
pred: URIRef,
obj: Union[Literal, URIRef],
a_p: URIRef,
a_o: Union[Literal, URIRef],) -> BNode:
""" Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is
storing the annotation information.
Axiom Form Example:
[ a owl:Axiom ;
owl:annotatedSource ILX:id ;
owl:annotatedProperty ilxtr:hasWikiDataId ;
owl:annotatedTarget wdt:something ;
rdfs:label "ILX label" ;
skos:altLabel "wikidata label" ] .
"""
bnode = self.axiom_triple_2_bnode.get( (subj, pred, obj) )
# If axiom is not created yet, make one
if not bnode:
a_s = BNode()
self.axiom_triple_2_bnode[(subj, pred, obj)]: BNode = a_s
self.add( (a_s, RDF.type, OWL.Axiom) )
self.add( (a_s, OWL.annotatedSource, subj) )
self.add( (a_s, OWL.annotatedProperty, pred) )
self.add( (a_s, OWL.annotatedTarget, obj) )
# Append to existing axiom
else:
a_s = bnode
self.add( (a_s, a_p, a_o) )
return a_s # In case you have more triples to add
graph = rdfGraph()
prefixes = {
'hasRole': 'http://purl.obolibrary.org/obo/RO_0000087',
'inheresIn': 'http://purl.obolibrary.org/obo/RO_0000052',
'bearerOf': 'http://purl.obolibrary.org/obo/RO_0000053',
'participatesIn': 'http://purl.obolibrary.org/obo/RO_0000056',
'hasParticipant': 'http://purl.obolibrary.org/obo/RO_0000057',
'adjacentTo': 'http://purl.obolibrary.org/obo/RO_0002220',
'derivesFrom': 'http://purl.obolibrary.org/obo/RO_0001000',
'derivesInto': 'http://purl.obolibrary.org/obo/RO_0001001',
'agentIn': 'http://purl.obolibrary.org/obo/RO_0002217',
'hasAgent': 'http://purl.obolibrary.org/obo/RO_0002218',
'containedIn': 'http://purl.obolibrary.org/obo/RO_0001018',
'contains': 'http://purl.obolibrary.org/obo/RO_0001019',
'locatedIn': 'http://purl.obolibrary.org/obo/RO_0001025',
'locationOf': 'http://purl.obolibrary.org/obo/RO_0001015',
'toward': 'http://purl.obolibrary.org/obo/RO_0002503',
'replacedBy': 'http://purl.obolibrary.org/obo/IAO_0100001',
'hasCurStatus': 'http://purl.obolibrary.org/obo/IAO_0000114',
'definition': 'http://purl.obolibrary.org/obo/IAO_0000115',
'editorNote': 'http://purl.obolibrary.org/obo/IAO_0000116',
'termEditor': 'http://purl.obolibrary.org/obo/IAO_0000117',
'altTerm': 'http://purl.obolibrary.org/obo/IAO_0000118',
'defSource': 'http://purl.obolibrary.org/obo/IAO_0000119',
'termsMerged': 'http://purl.obolibrary.org/obo/IAO_0000227',
'obsReason': 'http://purl.obolibrary.org/obo/IAO_0000231',
'curatorNote': 'http://purl.obolibrary.org/obo/IAO_0000232',
'importedFrom': 'http://purl.obolibrary.org/obo/IAO_0000412',
'partOf': 'http://purl.obolibrary.org/obo/BFO_0000050',
'hasPart': 'http://purl.obolibrary.org/obo/BFO_0000051',
'ILX': 'http://uri.interlex.org/base/ilx_',
'ilx': 'http://uri.interlex.org/base/',
'ilxr': 'http://uri.interlex.org/base/readable/',
'ilxtr': 'http://uri.interlex.org/tgbugs/uris/readable/',
'fobo': 'http://uri.interlex.org/fakeobo/uris/obo/',
'PROTEGE': 'http://protege.stanford.edu/plugins/owl/protege#',
'UBERON': 'http://purl.obolibrary.org/obo/UBERON_',
'ILXREPLACE': 'http://ILXREPLACE.org/',
'FIXME': 'http://FIXME.org/',
'NIFTTL': 'http://ontology.neuinfo.org/NIF/ttl/',
'NIFRET': 'http://ontology.neuinfo.org/NIF/Retired/NIF-Retired.owl#',
'NLXWIKI': 'http://neurolex.org/wiki/',
'dc': 'http://purl.org/dc/elements/1.1/',
'dcterms': 'http://purl.org/dc/terms/',
'nsu': 'http://www.FIXME.org/nsupper#',
'oboInOwl': 'http://www.geneontology.org/formats/oboInOwl#',
'owl': 'http://www.w3.org/2002/07/owl#',
'ro': 'http://www.obofoundry.org/ro/ro.owl#',
'skos': 'http://www.w3.org/2004/02/skos/core#',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'prov': 'http://www.w3.org/ns/prov#',
'NIFRID': 'http://uri.neuinfo.org/nif/nifstd/readable/',
}
for prefix, ns in prefixes.items():
graph.bind(prefix, ns)
ilx_uri_base = 'http://uri.interlex.org/base'
in_sanity_check = {}
DEFINITION = Namespace('http://purl.obolibrary.org/obo/IAO_0000115')
ILXTR = Namespace('http://uri.interlex.org/tgbugs/uris/readable/')
NIFRID = Namespace('http://uri.neuinfo.org/nif/nifstd/readable/')
terms = open_pickle(p.home()/'Dropbox/interlex_backups/ilx_db_terms_backup.pickle')
for row in terms.itertuples():
ilx_uri = '/'.join([ilx_uri_base, row.ilx])
ilx_uri = URIRef(ilx_uri)
in_sanity_check[ilx_uri] = True
if row.type in ['term', 'cde', 'fde', 'pde']:
graph.add((ilx_uri, RDF.type, OWL.Class))
elif row.type == 'annotation':
graph.add((ilx_uri, RDF.type, OWL.AnnotationProperty))
elif row.type == 'relationship':
graph.add((ilx_uri, RDF.type, OWL.ObjectProperty))
else:
graph.add((ilx_uri, RDF.type, OWL.Lost))
print('We have an no type entity!', row.ilx)
graph.add((ilx_uri, RDFS.label, Literal(row.label)))
graph.add((ilx_uri, URIRef(DEFINITION), Literal(row.definition)))
del terms
print('=== Class-AnnotationProperty-ObjectProperty triples complete ===')
ilx2ex = defaultdict(list)
ex = open_pickle(p.home()/'Dropbox/interlex_backups/ilx_db_ex_backup.pickle')
for row in ex.itertuples():
ilx_uri = '/'.join([ilx_uri_base, row.ilx])
ilx_uri = URIRef(ilx_uri)
if not in_sanity_check.get(ilx_uri):
print('ex', ilx_uri)
graph.add( (ilx_uri, ILXTR.existingId, URIRef(row.iri)) )
ilx2ex[row.ilx].append(row.iri)
del ex
print('=== existingId triples complete ===')
synonyms = open_pickle(p.home() / 'Dropbox/interlex_backups/ilx_db_synonyms_backup.pickle')
for row in synonyms.itertuples():
ilx_uri = '/'.join([ilx_uri_base, row.ilx])
ilx_uri = URIRef(ilx_uri)
if not in_sanity_check.get(ilx_uri):
print('synonyms', ilx_uri)
graph.add( (ilx_uri, NIFRID.synonym, Literal(row.literal)) )
del synonyms
print('=== synonym triples complete ===')
superclasses = open_pickle(p.home() / 'Dropbox/interlex_backups/ilx_db_superclasses_backup.pickle')
for row in superclasses.itertuples():
ilx_uri = '/'.join([ilx_uri_base, row.term_ilx])
ilx_uri = URIRef(ilx_uri)
if not in_sanity_check.get(ilx_uri):
print('superclasses', ilx_uri)
for existing_id in ilx2ex[row.term_ilx]:
id_ = URIRef(f'http://uri.interlex.org/base/{existing_id}')
graph.add( (ilx_uri, RDFS.subClassOf, id_) )
del superclasses
print('=== superclass triples complete ===')
### Data is both huge and not useful
annos = open_pickle(p.home() / 'Dropbox/interlex_backups/ilx_db_annos_backup.pickle')
for row in annos.itertuples():
ilx_uri = '/'.join([ilx_uri_base, row.term_ilx])
ilx_uri = URIRef(ilx_uri)
annotation_ilx_uri = '/'.join([ilx_uri_base, row.annotation_type_ilx])
if not in_sanity_check.get(ilx_uri):
print('annotations', ilx_uri)
prefix = ''.join([w.capitalize() for w in row.annotation_type_label.split()])
graph.bind(prefix, annotation_ilx_uri)
annotation_ilx_uri = URIRef(annotation_ilx_uri)
# TODO: check if row.value is a Literal or a URIRef
graph.add_annotation(ilx_uri, RDF.type, OWL.Class, annotation_ilx_uri, Literal(row.value))
# AnnotationProperty was defined in Cllas triples
graph.add( (ilx_uri, annotation_ilx_uri, Literal(row.value)) )
del annos
print('=== annotation axiom triples complete ===')
relationships = open_pickle(p.home() / 'Dropbox/interlex_backups/ilx_db_relationships_backup.pickle')
for row in relationships.itertuples():
prefix = ''.join([w.capitalize() for w in row.relationship_label.split()])
relationship_ilx_uri = '/'.join([ilx_uri_base, row.relationship_ilx])
graph.bind(prefix, relationship_ilx_uri)
relationship_ilx_uri = URIRef(relationship_ilx_uri)
term1_ilx_uri = '/'.join([ilx_uri_base, row.term1_ilx])
term1_ilx_uri = URIRef(term1_ilx_uri)
if not in_sanity_check.get(term1_ilx_uri): print('relationships', term1_ilx_uri)
term2_ilx_uri = '/'.join([ilx_uri_base, row.term2_ilx])
term2_ilx_uri = URIRef(term2_ilx_uri)
if not in_sanity_check.get(term2_ilx_uri): print('relationships', term2_ilx_uri)
graph.add( (term1_ilx_uri, relationship_ilx_uri, term2_ilx_uri) )
graph.add( (term2_ilx_uri, relationship_ilx_uri, term1_ilx_uri) )
print('=== relationship triples complete ===')
graph.serialize(destination=str(p.home()/'Dropbox/interlex_backups/InterLex.ttl'), format='turtle')
graph.serialize(destination=str(p.home()/'Dropbox/interlex_backups/SciGraph/SciGraph-core/src/test/resources/ontologies/'), format='turtle')
create_pickle(graph, p.home()/'Dropbox/interlex_backups/InterLex.graph.pickle')
|
|
#!/usr/bin/env python
import os, sys, time, datetime, codecs, shutil, subprocess, re, math, base64
from stat import *
from tiapp import *
from xml.dom.minidom import parseString
# mako and simplejson are in support/common
this_dir = os.path.dirname(os.path.abspath(__file__))
common_dir = os.path.join(os.path.dirname(this_dir), "common")
sys.path.append(common_dir)
import mako.template
from mako import runtime
import simplejson
from csspacker import CSSPacker
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store']
ignoreDirs = ['.git','.svn','_svn','CVS']
year = datetime.datetime.now().year
HTML_HEADER = """<!--
WARNING: this is generated code and will be lost if changes are made.
This generated source code is Copyright (c) 2010-%d by Appcelerator, Inc. All Rights Reserved.
-->""" % year
HEADER = """/**
* WARNING: this is generated code and will be lost if changes are made.
* This generated source code is Copyright (c) 2010-%d by Appcelerator, Inc. All Rights Reserved.
*/
""" % year
def compare_versions(version1, version2):
def normalize(v):
v = '.'.join(map((lambda s:re.sub(r'[^\d]+(.*)$','',s)), v.split('.')[:3]))
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
class AppcTemplate(mako.template.Template):
def render(self, *args, **data):
return runtime._render(self, self.callable_, args, data, as_unicode=True)
class Compiler(object):
def __init__(self, project_path, deploytype):
start_time = time.time()
self.minify = deploytype == "production"
self.packages = []
self.project_dependencies = [] # modules that the project uses
self.modules_map = {} # all modules including deps => individual module deps
self.modules_to_cache = [] # all modules to be baked into require.cache()
self.modules_to_load = [] # all modules to be required at load time
self.tiplus_modules_to_load = [] # all modules to be required at load time
# initialize paths
self.sdk_path = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
self.sdk_src_path = os.path.join(self.sdk_path, 'src')
self.themes_path = os.path.join(self.sdk_path, 'themes')
self.ti_package_path = os.path.join(self.sdk_path, 'titanium')
self.modules_path = os.path.abspath(os.path.join(self.sdk_path, '..', '..', '..', '..', 'modules'))
self.project_path = project_path
self.build_path = os.path.join(project_path, 'build', 'mobileweb')
self.resources_path = os.path.join(project_path, 'Resources')
self.i18n_path = os.path.join(project_path, 'i18n')
self.ti_js_file = os.path.join(self.build_path, 'titanium.js')
sdk_version = os.path.basename(os.path.abspath(os.path.join(self.sdk_path, '..')))
print '[INFO] Titanium Mobile Web Compiler v%s' % sdk_version
if not os.path.exists(self.project_path):
print '[ERROR] Invalid project "%s"' % self.project_path
sys.exit(1)
# read the package.json
self.load_package_json()
# register the titanium package
self.packages.append({
'name': self.package_json['name'],
'location': './titanium',
'main': self.package_json['main']
})
# read the tiapp.xml
tiapp_xml = TiAppXML(os.path.join(self.project_path, 'tiapp.xml'), deploytype)
print '[INFO] Compiling Mobile Web project "%s" [%s]' % (tiapp_xml['name'], deploytype)
# create the build directory
if os.path.exists(self.build_path):
shutil.rmtree(self.build_path, True)
try:
os.makedirs(self.build_path)
except:
pass
# copy all of the project's resources to the build directory
self.copy(self.themes_path, os.path.join(self.build_path, 'themes'))
self.copy(self.resources_path, self.build_path, ['android', 'iphone'])
self.copy(os.path.join(self.resources_path, 'mobileweb'), self.build_path, ['apple_startup_images', 'splash'])
self.copy(os.path.join(self.resources_path, 'mobileweb', 'apple_startup_images', 'Default.jpg'), self.build_path)
self.copy(os.path.join(self.resources_path, 'mobileweb', 'apple_startup_images', 'Default-Portrait.jpg'), self.build_path)
self.copy(os.path.join(self.resources_path, 'mobileweb', 'apple_startup_images', 'Default-Landscape.jpg'), self.build_path)
self.copy(self.ti_package_path, os.path.join(self.build_path, 'titanium'))
# scan project for dependencies
self.find_project_dependencies()
# scan all dependencies for distinct list of modules
self.find_modules_to_cache()
self.modules_to_cache.append('Ti/_/image')
self.modules_to_cache.append('Ti/_/include')
if len(tiapp_xml['precache']['requires']):
for req in tiapp_xml['precache']['requires']:
self.modules_to_cache.append('commonjs:' + req)
if len(tiapp_xml['precache']['includes']):
for inc in tiapp_xml['precache']['includes']:
self.modules_to_cache.append('url:' + inc)
# find only the top most modules to be required
areDeps = {}
for module in self.modules_to_cache:
# check if module is a dependent of another module
for m in self.modules_map:
deps = self.modules_map[m]
if module in deps:
areDeps[module] = 1
for module in self.modules_map:
if not module in areDeps:
self.modules_to_load.append(module)
# determine theme
theme = tiapp_xml['mobileweb']['theme']
if not os.path.exists(os.path.join(self.themes_path, theme)):
print '[ERROR] Theme "%s" does not exist' % theme
sys.exit(1)
# check what we need to precache
precache_images = []
if 'Ti/UI/TableViewRow' in self.modules_map:
precache_images.append('/themes/' + theme + '/UI/TableViewRow/child.png')
if len(tiapp_xml['precache']['images']):
for img in tiapp_xml['precache']['images']:
precache_images.append(img)
# detect Ti+ modules
if len(tiapp_xml['modules']):
print '[INFO] Locating Ti+ modules...'
for module in tiapp_xml['modules']:
if module['platform'] in ['', 'mobileweb', 'commonjs']:
is_commonjs = False
if 'version' in module and module['version']:
# search <project dir>/modules/mobileweb/<module>/<version>/
module_dir = os.path.join(self.project_path, 'modules', 'mobileweb', module['id'], module['version'])
if not os.path.exists(module_dir):
# search <project dir>/modules/commonjs/<module>/<version>/
module_dir = os.path.join(self.project_path, 'modules', 'commonjs', module['id'], module['version'])
if os.path.exists(module_dir):
is_commonjs = True
else:
# search <global module dir>/<module>/<version>/
module_dir = os.path.join(self.modules_path, 'mobileweb', module['id'], module['version'])
if not os.path.exists(module_dir):
# search <global commonjs dir>/<module>/<version>/
module_dir = os.path.join(self.modules_path, 'commonjs', module['id'], module['version'])
if os.path.exists(module_dir):
is_commonjs = True
else:
print '[ERROR] Unable to find Ti+ module "%s", v%s' % (module['id'], module['version'])
sys.exit(1)
else:
# no version number, gotta do it the hard way
# search <project dir>/modules/mobileweb/<module>/
module_dir = self.locate_module(os.path.join(self.project_path, 'modules', 'mobileweb', module['id']))
if module_dir is None:
# search <project dir>/modules/commonjs/<module>/<version>/
module_dir = self.locate_module(os.path.join(self.project_path, 'modules', 'commonjs', module['id']))
if module_dir is not None:
is_commonjs = True
else:
# search <global module dir>/<module>/<version>/
module_dir = self.locate_module(os.path.join(self.modules_path, 'mobileweb', module['id']))
if module_dir is None:
# search <global commonjs dir>/<module>/<version>/
module_dir = self.locate_module(os.path.join(self.modules_path, 'commonjs', module['id']))
if module_dir is not None:
is_commonjs = True
else:
print '[ERROR] Unable to find Ti+ module "%s"' % module['id']
sys.exit(1)
module_package_json_file = os.path.join(module_dir, 'package.json')
if not os.path.exists(module_package_json_file):
print '[ERROR] Ti+ module "%s" is invalid: missing package.json' % module['id']
sys.exit(1)
module_manifest_file = os.path.join(module_dir, 'manifest')
if not os.path.exists(module_manifest_file):
print '[ERROR] Ti+ module "%s" is invalid: missing manifest' % module['id']
sys.exit(1)
manifest = {}
for line in open(module_manifest_file).readlines():
line = line.strip()
if line[0:1] == '#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()] = value.strip()
if 'minsdk' in manifest and compare_versions(manifest['minsdk'], sdk_version) == 1:
print '[ERROR] Ti+ module "%s" requires a minimum SDK version of %s: current version %s' % (module['id'], manifest['minsdk'], sdk_version)
sys.exit(1)
module_package_json = simplejson.load(codecs.open(module_package_json_file, 'r', 'utf-8'))
main_file = module_package_json['main']
if main_file.endswith('.js'):
main_file = main_file[:-3]
lib = ''
if 'directories' in module_package_json and 'lib' in module_package_json['directories']:
lib = module_package_json['directories']['lib']
if lib.startswith('/'):
lib = lib[1:]
main_file_path = os.path.join(module_dir, lib, main_file + '.js')
if not os.path.exists(main_file_path):
print '[ERROR] Ti+ module "%s" is invalid: missing main "%s"' % (module['id'], main_file_path)
sys.exit(1)
print '[INFO] Bundling Ti+ module "%s" version %s' % (module['id'], manifest['version'])
self.project_dependencies.append(main_file)
module_name = module['id']
if module['id'] != main_file:
module_name += '/' + main_file
if is_commonjs:
self.modules_to_cache.append('commonjs:' + module_name)
else:
self.modules_to_cache.append(module_name)
if not is_commonjs:
self.tiplus_modules_to_load.append(module['id'])
if len(lib):
lib = '/' + lib
self.packages.append({
'name': module['id'],
'location': './' + self.compact_path('modules/' + module['id'] + lib),
'main': main_file,
'root': 1
})
# TODO: need to combine ALL Ti+ module .js files into the titanium.js, not just the main file
# TODO: need to combine ALL Ti+ module .css files into the titanium.css
# copy entire module directory to build directory
shutil.copytree(module_dir, os.path.join(self.build_path, 'modules', module['id']))
# detect circular dependencies
for module in self.modules_to_cache:
if module in self.modules_map:
mydeps = self.modules_map[module]
for dep in mydeps:
if dep in self.modules_map and module in self.modules_map[dep]:
print '[WARN] Circular dependency detected: %s dependent on %s' % (module, dep)
print '[INFO] Found %s dependenc%s, %s package%s, %s module%s' % (
len(self.project_dependencies), 'y' if len(self.project_dependencies) == 1 else 'ies',
len(self.packages), '' if len(self.packages) == 1 else 's',
len(self.modules_to_cache), '' if len(self.project_dependencies) == 1 else 's')
# TODO: break up the dependencies into layers
# TODO: minify the project's code first
app_names = {}
locales = []
if os.path.exists(self.i18n_path):
print '[INFO] Processing i18n strings...'
for dir in os.listdir(self.i18n_path):
app = self.load_i18n(os.path.join(self.i18n_path, dir, 'app.xml'))
if app is not None and 'appname' in app:
app_names[dir] = app['appname']
strings = self.load_i18n(os.path.join(self.i18n_path, dir, 'strings.xml'))
if strings is not None:
locales.append(dir)
locale_path = os.path.join(self.build_path, 'titanium', 'Ti', 'Locale', dir)
try:
os.makedirs(locale_path)
except:
pass
i18n_file = codecs.open(os.path.join(locale_path, 'i18n.js'), 'w', 'utf-8')
i18n_file.write('define(%s);' % simplejson.dumps(strings))
i18n_file.close()
if dir in tiapp_xml['precache']['locales']:
self.modules_to_cache.append('Ti/Locale/%s/i18n' % dir)
# build the titanium.js
print '[INFO] Assembling titanium.js...'
ti_js = codecs.open(self.ti_js_file, 'w', 'utf-8')
ti_js.write(HEADER + '\n')
# 1) read in the config.js and fill in the template
enableInstrumentation = tiapp_xml['mobileweb']['instrumentation'] == 'true' if 'instrumentation' in tiapp_xml['mobileweb'] else False
ti_js.write(AppcTemplate(codecs.open(os.path.join(self.sdk_src_path, 'config.js'), 'r', 'utf-8').read(), input_encoding='utf-8', output_encoding='utf-8').render(
app_analytics = tiapp_xml['analytics'],
app_copyright = tiapp_xml['copyright'],
app_description = tiapp_xml['description'],
app_guid = tiapp_xml['guid'],
app_id = tiapp_xml['id'],
app_name = tiapp_xml['name'],
app_names = simplejson.dumps(app_names),
app_publisher = tiapp_xml['publisher'],
app_url = tiapp_xml['url'],
app_version = tiapp_xml['version'],
deploy_type = deploytype,
locales = simplejson.dumps(locales),
packages = simplejson.dumps(self.packages, sort_keys=True),
project_id = tiapp_xml['id'],
project_name = tiapp_xml['name'],
ti_fs_registry = tiapp_xml['mobileweb']['filesystem']['registry'],
ti_theme = theme,
ti_githash = self.package_json['titanium']['githash'],
ti_timestamp = self.package_json['titanium']['timestamp'],
ti_version = sdk_version,
has_analytics_use_xhr = tiapp_xml['mobileweb']['analytics']['use-xhr'],
has_show_errors = 'false' if deploytype == 'production' or tiapp_xml['mobileweb']['disable-error-screen'] == 'true' else 'true',
has_instrumentation = 'true' if enableInstrumentation else 'false',
jsQuoteEscapeFilter = lambda str: str.replace("\\\"","\\\\\\\"")
))
# 2) copy in instrumentation if it's enabled
if enableInstrumentation:
ti_js.write(codecs.open(os.path.join(self.sdk_src_path, 'instrumentation.js'), 'r', 'utf-8').read())
# 3) copy in the loader
ti_js.write(codecs.open(os.path.join(self.sdk_src_path, 'loader.js'), 'r', 'utf-8').read() + '\n')
# 4) cache the dependencies
first = True
require_cache_written = False
module_counter = 0
# uncomment next line to bypass module caching (which is ill advised):
# self.modules_to_cache = {}
for x in self.modules_to_cache:
is_cjs = False
if x.startswith('commonjs:'):
is_cjs = True
x = x[9:]
dep = self.resolve(x, None)
if not len(dep):
continue
if not require_cache_written:
ti_js.write('require.cache({\n');
require_cache_written = True;
if not first:
ti_js.write(',\n')
first = False
module_counter += 1
filename = dep[1]
if not filename.endswith('.js'):
filename += '.js'
file_path = os.path.join(dep[0], filename)
if x.startswith('url:'):
source = file_path + '.uncompressed.js'
if self.minify:
os.rename(file_path, source)
print '[INFO] Minifying include %s' % file_path
p = subprocess.Popen('java -Xms256m -Xmx256m -jar "%s" --compilation_level SIMPLE_OPTIMIZATIONS --js "%s" --js_output_file "%s"' % (os.path.join(self.sdk_path, 'closureCompiler', 'compiler.jar'), source, file_path), shell=True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print '[ERROR] Failed to minify "%s"' % file_path
for line in stderr.split('\n'):
if len(line):
print '[ERROR] %s' % line
print '[WARN] Leaving %s un-minified' % file_path
os.remove(file_path)
shutil.copy(source, file_path)
ti_js.write('"%s":"%s"' % (x, codecs.open(file_path, 'r', 'utf-8').read().strip().replace('\\', '\\\\').replace('\n', '\\n\\\n').replace('\"', '\\\"')))
elif is_cjs:
ti_js.write('"%s":function(){\n/* %s */\ndefine(function(require, exports, module){\n%s\n});\n}' % (x, file_path.replace(self.build_path, ''), codecs.open(file_path, 'r', 'utf-8').read()))
else:
ti_js.write('"%s":function(){\n/* %s */\n\n%s\n}' % (x, file_path.replace(self.build_path, ''), codecs.open(file_path, 'r', 'utf-8').read()))
image_mime_types = {
'.png': 'image/png',
'.gif': 'image/gif',
'.jpg': 'image/jpg',
'.jpeg': 'image/jpg'
}
for x in precache_images:
x = x.replace('\\', '/')
y = x
if y.startswith(os.sep):
y = '.' + y
img = os.path.join(self.resources_path, os.sep.join(y.split('/')))
if os.path.exists(img):
fname, ext = os.path.splitext(img.lower())
if ext in image_mime_types:
if not require_cache_written:
ti_js.write('require.cache({\n');
require_cache_written = True;
if not first:
ti_js.write(',\n')
first = False
module_counter += 1
ti_js.write('"url:%s":"data:%s;base64,%s"' % (x, image_mime_types[ext], base64.b64encode(open(img,'rb').read())))
if require_cache_written:
ti_js.write('});\n')
# 4) write the ti.app.properties
def addProp(prop, val):
tiapp_xml['properties'][prop] = {
'type': 'string',
'value': val
}
addProp('ti.fs.backend', tiapp_xml['mobileweb']['filesystem']['backend'])
addProp('ti.map.backend', tiapp_xml['mobileweb']['map']['backend'])
addProp('ti.map.apikey', tiapp_xml['mobileweb']['map']['apikey'])
s = ''
for name in tiapp_xml['properties']:
prop = tiapp_xml['properties'][name]
if prop['type'] == 'bool':
s += 'p.setBool("' + name + '",' + prop['value'] + ');\n'
elif prop['type'] == 'int':
s += 'p.setInt("' + name + '",' + prop['value'] + ');\n'
elif prop['type'] == 'double':
s += 'p.setDouble("' + name + '",' + prop['value'] + ');\n'
else:
s += 'p.setString("' + name + '","' + str(prop['value']).replace('"', '\\"') + '");\n'
ti_js.write('require("Ti/App/Properties", function(p) {\n%s});\n' % s)
# 5) write require() to load all Ti modules
self.modules_to_load.sort()
self.modules_to_load += self.tiplus_modules_to_load
ti_js.write('require(%s);\n' % simplejson.dumps(self.modules_to_load))
# 6) close the titanium.js
ti_js.close()
# build the splash screen
splash_html = ''
splash_css = ''
if tiapp_xml['mobileweb']['splash']['enabled'] == 'true':
print '[INFO] Processing splash screen...'
splash_path = os.path.join(self.project_path, 'Resources', 'mobileweb', 'splash')
splash_root_path = os.path.join(self.project_path, 'Resources')
if not os.path.exists(splash_path):
splash_path = os.path.join(self.sdk_path, 'splash')
splash_root_path = splash_path
splash_html_file = os.path.join(splash_path, 'splash.html')
splash_css_file = os.path.join(splash_path, 'splash.css')
if os.path.exists(splash_html_file):
splash_html = codecs.open(splash_html_file, 'r', 'utf-8').read()
if os.path.exists(splash_css_file):
splash_css = codecs.open(splash_css_file, 'r', 'utf-8').read()
if tiapp_xml['mobileweb']['splash']['inline-css-images'] == 'true':
parts = splash_css.split('url(')
for i in range(1, len(parts)):
j = parts[i].find(')')
if j != -1:
img = parts[i][:j].replace('"', '').replace('\'', '').strip()
if img.find('data:') == -1:
if img[1] == '/':
img_path = os.path.join(splash_root_path, img[1:])
else:
img_path = os.path.join(splash_path, img)
if os.path.exists(img_path):
fname, ext = os.path.splitext(img_path.lower())
if ext in image_mime_types:
parts[i] = 'data:%s;base64,%s%s' % (image_mime_types[ext], base64.b64encode(open(img_path,'rb').read()), parts[i][j:])
splash_css = 'url('.join(parts)
# build the titanium.css file
print '[INFO] Assembling titanium.css...'
ti_css = HEADER + '\n' + splash_css + '\n' + codecs.open(os.path.join(self.themes_path, 'common.css'), 'r', 'utf-8').read()
# TODO: need to rewrite absolute paths for urls
# TODO: code below does NOT inline imports, nor remove them... do NOT use imports until themes are fleshed out
if len(theme):
theme_path = os.path.join(self.resources_path, 'themes', theme)
if not os.path.exists(theme_path):
theme_path = os.path.join(self.resources_path, theme)
if not os.path.exists(theme_path):
theme_path = os.path.join(self.themes_path, theme)
if not os.path.exists(theme_path):
print '[ERROR] Unable to locate theme "%s"' % theme
else:
for dirname, dirnames, filenames in os.walk(theme_path):
for filename in filenames:
fname, ext = os.path.splitext(filename.lower())
if ext == '.css':
ti_css += codecs.open(os.path.join(dirname, filename), 'r', 'utf-8').read()
# detect any fonts and add font face rules to the css file
fonts = {}
for dirname, dirnames, filenames in os.walk(self.resources_path):
for filename in filenames:
fname, ext = os.path.splitext(filename.lower())
if ext == '.otf' or ext == '.woff':
if not fname in fonts:
fonts[fname] = []
fonts[fname].append(os.path.join(dirname, filename)[len(self.resources_path):])
for font in fonts:
ti_css += '@font-face{font-family:%s;src:url(%s);}\n' % (font, '),url('.join(fonts[font]))
# minify the css
if self.minify:
ti_css = CSSPacker(ti_css).pack()
# write the titanium.css
ti_css_file = codecs.open(os.path.join(self.build_path, 'titanium.css'), 'w', 'utf-8')
ti_css_file.write(ti_css)
ti_css_file.close()
# minify all javascript, html, and css files
if self.minify:
# TODO: only minify non-project code (i.e. Titanium and Ti+ modules)
subprocess.call('java -Xms256m -Xmx256m -cp "%s%s%s" -Djava.awt.headless=true minify "%s"' % (os.path.join(self.sdk_path, 'minify'), os.pathsep, os.path.join(self.sdk_path, 'closureCompiler', 'compiler.jar'), self.build_path), shell=True)
# elif ext == '.json':
# TODO: minify json
# elif ext == '.css':
# TODO: minify css
# elif ext == '.html':
# TODO: minify html
# create the favicon and apple touch icons
icon_file = os.path.join(self.resources_path, tiapp_xml['icon'])
fname, ext = os.path.splitext(icon_file.lower())
if os.path.exists(icon_file) and (ext == '.png' or ext == '.jpg' or ext == '.gif'):
self.build_icons(icon_file)
else:
icon_file = os.path.join(self.resources_path, 'mobileweb', 'appicon.png')
if os.path.exists(icon_file):
self.build_icons(icon_file)
# create the filesystem registry
print '[INFO] Building filesystem registry...'
filesystem_registry = 'ts\t' + str(int(os.path.getctime(self.build_path)) * 1000) + '\n' + self.walk_fs(self.build_path, 0)
filesystem_registry_file = codecs.open(os.path.join(self.build_path, 'titanium', 'filesystem.registry'), 'w', 'utf-8')
filesystem_registry_file.write(filesystem_registry)
filesystem_registry_file.close()
# if we're preloading the filesystem registry, write it to the require cache
if tiapp_xml['mobileweb']['filesystem']['registry'] == 'preload':
ti_js = codecs.open(self.ti_js_file, 'a', 'utf-8')
ti_js.write('require.cache({"url:/titanium/filesystem.registry":"' + filesystem_registry.strip().replace('\n', '|') + '"});')
ti_js.close()
# get status bar style
status_bar_style = 'default'
if 'statusbar-style' in tiapp_xml:
status_bar_style = tiapp_xml['statusbar-style']
if status_bar_style == 'opaque_black' or status_bar_style == 'opaque':
status_bar_style = 'black'
elif status_bar_style == 'translucent_black' or status_bar_style == 'transparent' or status_bar_style == 'translucent':
status_bar_style = 'black-translucent'
else:
status_bar_style = 'default'
# populate index.html
index_html_file = codecs.open(os.path.join(self.build_path, 'index.html'), 'w', 'utf-8')
index_html_file.write(AppcTemplate(codecs.open(os.path.join(self.sdk_src_path, 'index.html'), 'r', 'utf-8').read().strip(), input_encoding='utf-8', output_encoding='utf-8').render(
ti_header = HTML_HEADER,
project_name = tiapp_xml['name'] or '',
app_description = tiapp_xml['description'] or '',
app_publisher = tiapp_xml['publisher'] or '',
splash_screen = splash_html,
ti_generator = 'Appcelerator Titanium Mobile ' + sdk_version,
ti_statusbar_style = status_bar_style,
ti_css = ti_css,
ti_js = codecs.open(self.ti_js_file, 'r', 'utf-8').read()
))
index_html_file.close()
total_time = round(time.time() - start_time)
total_minutes = math.floor(total_time / 60)
total_seconds = total_time % 60
if total_minutes > 0:
print '[INFO] Finished in %s minutes %s seconds' % (int(total_minutes), int(total_seconds))
else:
print '[INFO] Finished in %s seconds' % int(total_time)
def load_i18n(self, xml_file):
if not os.path.exists(xml_file):
return None
strings = {}
dom = parseString(codecs.open(xml_file, 'r', 'utf-8', 'replace').read().encode('utf-8'))
root = dom.documentElement
for node in root.childNodes:
if node.nodeType == 1 and node.nodeName == 'string':
name = node.getAttribute('name')
if name is not '':
val = ''
for inner in node.childNodes:
if inner.nodeType == node.TEXT_NODE:
val = val + inner.data
strings[name] = val.encode('utf-8').decode('string-escape').strip()
return strings
def walk_fs(self, path, depth):
s = ''
listing = os.listdir(path)
listing.sort()
for file in listing:
p = os.path.join(path, file)
# TODO: screen out specific file/folder patterns (i.e. uncompressed js files)
if os.path.isdir(p):
s += ('\t' * depth) + file + '\n' + self.walk_fs(p, depth + 1)
else:
s += ('\t' * depth) + file + '\t' + str(os.path.getsize(p)) + '\n'
return s
def resolve(self, it, ref):
parts = it.split('!')
it = parts[-1]
if it.startswith('url:'):
it = it[4:]
if it.startswith('/'):
it = '.' + it
parts = it.split('/')
for p in self.packages:
if p['name'] == parts[0]:
return [self.compact_path(os.path.join(self.build_path, p['location'])), it]
return [self.build_path, it]
if it.find(':') != -1:
return []
if it.startswith('/') or (len(parts) == 1 and it.endswith('.js')):
return [self.build_path, it]
if it.startswith('.') and ref is not None:
it = self.compact_path(ref + it)
parts = it.split('/')
for p in self.packages:
if p['name'] == parts[0]:
if p['name'] != 'Ti':
it = it.replace(p['name'] + '/', '')
return [self.compact_path(os.path.join(self.build_path, p['location'])), it]
return [self.build_path, it]
def copy(self, src_path, dest_path, ignore=None):
if os.path.exists(src_path):
print '[INFO] Copying %s...' % src_path
if os.path.isdir(src_path):
for root, dirs, files in os.walk(src_path):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name)
if ignore is not None and root == src_path:
for name in ignore:
if name in dirs:
dirs.remove(name)
for file in files:
if file in ignoreFiles or file.startswith('._'):
continue
source = os.path.join(root, file)
dest = os.path.expanduser(source.replace(src_path, dest_path, 1))
dest_dir = os.path.expanduser(os.path.split(dest)[0])
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(source, dest)
else:
shutil.copy(src_path, dest_path)
def compact_path(self, path):
result = []
path = path.replace('\\', '/').split('/');
while len(path):
segment = path[0]
path = path[1:]
if segment == '..' and len(result) and lastSegment != '..':
result.pop()
lastSegment = result[-1]
elif segment != '.':
lastSegment = segment
result.append(segment)
return '/'.join(result);
def build_icons(self, src):
print '[INFO] Generating app icons...'
favicon = os.path.join(self.build_path, 'favicon.png')
s = 'java -Xms256m -Xmx256m -cp "%s%s%s" -Dquiet=true -Djava.awt.headless=true resize "%s"' % (os.path.join(self.sdk_path, 'imageResizer'), os.pathsep, os.path.join(self.sdk_path, 'imageResizer', 'imgscalr-lib-4.2.jar'), src)
s += ' "%s" %d %d' % (favicon, 16, 16)
s += ' "%s" %d %d' % (os.path.join(self.build_path, 'apple-touch-icon-precomposed.png'), 57, 57)
s += ' "%s" %d %d' % (os.path.join(self.build_path, 'apple-touch-icon-57x57-precomposed.png'), 57, 57)
s += ' "%s" %d %d' % (os.path.join(self.build_path, 'apple-touch-icon-72x72-precomposed.png'), 72, 72)
s += ' "%s" %d %d' % (os.path.join(self.build_path, 'apple-touch-icon-114x114-precomposed.png'), 114, 114)
subprocess.call(s, shell=True)
os.rename(favicon, os.path.join(self.build_path, 'favicon.ico'))
def load_package_json(self):
package_json_file = os.path.join(self.ti_package_path, 'package.json')
if not os.path.exists(package_json_file):
print '[ERROR] Unable to open titanium package manifest "%s"' % package_json_file
sys.exit(1)
self.package_json = simplejson.load(codecs.open(package_json_file, 'r', 'utf-8'))
def locate_module(self, path):
module_dir = None
module_version = '0.0.0'
if os.path.exists(path):
for dir in os.listdir(path):
mdir = os.path.join(path, dir)
if os.path.isdir(mdir) and compare_versions(module_version, dir) == -1:
module_version = dir
module_dir = mdir
return module_dir
def find_project_dependencies(self):
print '[INFO] Scanning project for dependencies...'
# TODO: using an AST, scan the entire project's source and identify all dependencies
self.project_dependencies += [
'Ti',
'Ti/Accelerometer',
'Ti/Analytics',
'Ti/API',
'Ti/App',
'Ti/App/Properties',
'Ti/Blob',
'Ti/Buffer',
'Ti/Codec',
'Ti/Facebook',
'Ti/Facebook/LoginButton',
'Ti/Filesystem',
'Ti/Filesystem/File',
'Ti/Filesystem/FileStream',
'Ti/Gesture',
'Ti/_/Gestures/GestureRecognizer',
'Ti/_/Gestures/Dragging',
'Ti/_/Gestures/DoubleTap',
'Ti/_/Gestures/LongPress',
'Ti/_/Gestures/Pinch',
'Ti/_/Gestures/SingleTap',
'Ti/_/Gestures/Swipe',
'Ti/_/Gestures/TouchCancel',
'Ti/_/Gestures/TouchEnd',
'Ti/_/Gestures/TouchMove',
'Ti/_/Gestures/TouchStart',
'Ti/_/Gestures/TwoFingerTap',
'Ti/Geolocation',
'Ti/IOStream',
'Ti/Locale',
'Ti/Media',
'Ti/Media/VideoPlayer',
'Ti/Network',
'Ti/Network/HTTPClient',
'Ti/Platform',
'Ti/Platform/DisplayCaps',
'Ti/Map',
'Ti/Map/View',
'Ti/Map/Annotation',
'Ti/UI',
'Ti/UI/2DMatrix',
'Ti/UI/ActivityIndicator',
'Ti/UI/AlertDialog',
'Ti/UI/Animation',
'Ti/UI/Button',
'Ti/UI/Clipboard',
'Ti/UI/EmailDialog',
'Ti/UI/ImageView',
'Ti/UI/Label',
'Ti/UI/MobileWeb',
'Ti/UI/MobileWeb/NavigationGroup',
'Ti/UI/OptionDialog',
'Ti/UI/Picker',
'Ti/UI/PickerColumn',
'Ti/UI/PickerRow',
'Ti/UI/ProgressBar',
'Ti/UI/ScrollableView',
'Ti/UI/ScrollView',
'Ti/UI/Slider',
'Ti/UI/Switch',
'Ti/UI/Tab',
'Ti/UI/TabGroup',
'Ti/UI/TableView',
'Ti/UI/TableViewRow',
'Ti/UI/TableViewSection',
'Ti/UI/TextArea',
'Ti/UI/TextField',
'Ti/UI/View',
'Ti/UI/WebView',
'Ti/UI/Window',
'Ti/Utils',
'Ti/XML',
'Ti/Yahoo'
]
def parse_deps(self, deps):
found = []
if len(deps) > 2:
deps = deps[1:-1]
deps = deps.split(',')
for dep in deps:
dep = dep.strip().split(' ')[0].strip()
if dep.startswith('\'') or dep.startswith('"'):
found.append(simplejson.loads(dep))
return found
def find_modules_to_cache(self):
print '[INFO] Searching for all required modules...'
self.require_cache = {}
for module in self.project_dependencies:
self.parse_module(module, None)
self.modules_to_cache = []
for module in self.require_cache:
self.modules_to_cache.append(module)
def parse_module(self, module, ref):
if module in self.require_cache or module == 'require':
return
parts = module.split('!')
if len(parts) == 1:
if module.startswith('.') and ref is not None:
module = self.compact_path(ref + module)
self.require_cache[module] = 1
dep = self.resolve(module, ref)
if not len(dep):
return
if len(parts) > 1:
self.require_cache['url:' + parts[1]] = 1
filename = dep[1]
if not filename.endswith('.js'):
filename += '.js'
source = codecs.open(os.path.join(dep[0], filename), 'r', 'utf-8').read()
pattern = re.compile('define\(\s*([\'\"][^\'\"]*[\'\"]\s*)?,?\s*(\[[^\]]+\])\s*?,?\s*(function|\{)')
results = pattern.search(source)
if results is None:
self.modules_map[module] = []
else:
groups = results.groups()
if groups is not None and len(groups):
if groups[1] is None:
self.modules_map[module] = []
else:
deps = self.parse_deps(groups[1])
for i in range(0, len(deps)):
dep = deps[i]
parts = dep.split('!')
ref = module.split('/')
ref.pop()
ref = '/'.join(ref) + '/'
if dep.startswith('.'):
deps[i] = self.compact_path(ref + dep)
if len(parts) == 1:
if dep.startswith('./'):
parts = module.split('/')
parts.pop()
parts.append(dep)
self.parse_module(self.compact_path('/'.join(parts)), ref)
else:
self.parse_module(dep, ref)
else:
self.modules_map[dep] = parts[0]
self.parse_module(parts[0], module)
if parts[0] == 'Ti/_/text':
if dep.startswith('./'):
parts = module.split('/')
parts.pop()
parts.append(dep)
self.parse_module(self.compact_path('/'.join(parts)), ref)
else:
self.parse_module(dep, ref)
self.modules_map[module] = deps
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.